diff --git a/.github/workflows/python.yaml b/.github/workflows/python.yaml index e9232627..3d41f8a4 100644 --- a/.github/workflows/python.yaml +++ b/.github/workflows/python.yaml @@ -8,61 +8,47 @@ on: jobs: Python: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 strategy: fail-fast: false matrix: - python-version: ["3.9", "3.10", "3.11"] - - env: - API_TOKEN: ${{ secrets.TEST_API_TOKEN }} - DATASET_ID: ${{ secrets.DATASET_ID }} - PDF_DATASET_ID: ${{ secrets.PDF_DATASET_ID }} - MODEL_NAME: ${{ secrets.MODEL_NAME }} - WORKFLOW_ID: ${{ secrets.WORKFLOW_ID }} - MODEL_ID: ${{ secrets.MODEL_ID }} - MODEL_GROUP_ID: ${{ secrets.MODEL_GROUP_ID }} - TEACH_TASK_ID: ${{ secrets.TEACH_TASK_ID }} - HOST_URL: try.indico.io - + python-version: ["3.10", "3.11", "3.12", "3.13"] + steps: - name: Checkout Commit - uses: actions/checkout@v3 + uses: actions/checkout@v4 + + - name: Install Poetry + run: pipx install "poetry>=2,<3" - name: Install Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} + cache: "poetry" - name: Install Dependencies run: | - python -m pip install --upgrade pip - python -m pip install -r requirements.txt - python -m pip install -e .[full] - python -m pip install flake8 pytest pytest-cov + poetry env use ${{ matrix.python-version }} + poetry install - - name: Run Tests And Build Coverage File - run: | - pytest --junitxml=pytest.xml --cov-report=term-missing:skip-covered --cov=indico_toolkit tests/ | tee pytest-coverage.txt + - name: Run Black + run: poetry run poe black-check - - name: Pytest Coverage Comment - id: coverageComment - uses: MishaKav/pytest-coverage-comment@main - with: - pytest-coverage-path: ./pytest-coverage.txt - junitxml-path: ./pytest.xml - title: Indico Toolkit Coverage Report - badge-title: Test Coverage - default-branch: main + - name: Run Ruff + run: poetry run poe ruff-check - - name: Check The Output Coverage + - name: Run Mypy + run: poetry run poe mypy + + - name: Run Pytest + run: poetry run poe test + + - name: Install Extra Dependencies run: | - echo "Coverage Percentage - ${{ steps.coverageComment.outputs.coverage }}" - echo "Coverage Warnings - ${{ steps.coverageComment.outputs.warnings }}" - echo "Coverage Errors - ${{ steps.coverageComment.outputs.errors }}" - echo "Coverage Failures - ${{ steps.coverageComment.outputs.failures }}" - echo "Coverage Skipped - ${{ steps.coverageComment.outputs.skipped }}" - echo "Coverage Tests - ${{ steps.coverageComment.outputs.tests }}" - echo "Coverage Time - ${{ steps.coverageComment.outputs.time }}" - echo "Not Success Test Info - ${{ steps.coverageComment.outputs.notSuccessTestInfo }}" + poetry env use ${{ matrix.python-version }} + poetry install --all-extras + + - name: Run Pytest on Extras + run: poetry run poe test diff --git a/CHANGELOG.md b/CHANGELOG.md index 01fe20ba..e268c7ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -141,9 +141,15 @@ This is the first major version release tested to work on Indico 6.X. * Small but important fix to add original filename to the workflow result object - ## 6.1.0 5/6/24 ### Removed * Removed staggered loop support and removed highlighting support. + +## 6.14.0 3/10/25 + +* Added `results` module. +* Added `etloutput` module. +* Refactored `retry` decorator with asyncio support. +* Switched to Poetry for packaging and dependency management. diff --git a/README.md b/README.md index cc45db07..314b9b87 100644 --- a/README.md +++ b/README.md @@ -1,84 +1,109 @@ -# Indico-Toolkit +# Indico Toolkit -A library to assist Indico IPA development +**This repository contains software that is not officially supported by Indico. It may + be outdated or contain bugs. The operations it performs are potentially destructive. + Use at your own risk.** -### Available Functionality +Classes, functions, and abstractions for building workflows using the Indico IPA +(Intelligent Process Automation) platform. -The indico-toolkit provides classes and functions to help achieve the following: +- [Polling Classes](https://github.com/IndicoDataSolutions/indico-toolkit-python/tree/main/indico_toolkit/polling/__init__.py) + that implement best-practices polling behavior for Auto Review and Downstream + processes. Easily plug in business logic without the boilerplate. +- [Result File](https://github.com/IndicoDataSolutions/indico-toolkit-python/blob/main/indico_toolkit/results/__init__.py) + and [Etl Output](https://github.com/IndicoDataSolutions/indico-toolkit-python/blob/main/indico_toolkit/etloutput/__init__.py) + Data Classes that parse standard IPA JSON output into idiomatic, type-safe Python dataclasses. +- [Metrics Classes](https://github.com/IndicoDataSolutions/indico-toolkit-python/blob/main/indico_toolkit/metrics/__init__.py) + to compare model performance, evaluate ground truth, and plot statistics. +- [Snapshot Classes](https://github.com/IndicoDataSolutions/indico-toolkit-python/blob/main/indico_toolkit/snapshots/snapshot.py) + to concatenate, merge, filter, and manipulate snapshot CSVs. -* Easy batch workflow submission and retrieval. -* Classes that simplify dataset/doc-extraction functionality. -* Tools to assist with positioning, e.g. row association, distance between preds, relative position validation. -* Tools to assist with creating and copying workflow structures. -* Get metrics for all model IDs in a model group to see how well fields are performing after more labeling. -* Compare two models via bar plot and data tables. -* Train a document classification model without labeling. -* An AutoReview class to assist with automated acceptance/rejection of model predictions. -* Common manipulation of prediction/workflow results. -* Objects to simplify parsing OCR responses. -* Snapshot merging and manipulation +...and more in the [Examples](https://github.com/IndicoDataSolutions/indico-toolkit-python/tree/main/examples) folder. -### Installation +## Installation + +**Indico Toolkit does not use semantic versioning.** + +Indico Toolkit versions match the minimum IPA version required to use its functionality. +E.g. `indico-toolkit==6.14.0` makes use of functionality introduced in IPA 6.14, and +some functionality requires IPA 6.14 or later to use. + +```bash +pip install indico-toolkit ``` -pip install indico_toolkit + +Some functionality requires optional dependencies that can be installed with extras. + +```bash +pip install 'indico-toolkit[all]' +pip install 'indico-toolkit[downloads]' +pip install 'indico-toolkit[examples]' +pip install 'indico-toolkit[metrics]' +pip install 'indico-toolkit[predictions]' +pip install 'indico-toolkit[snapshots]' ``` -* Note: if you are on Indico 6.X, install an indico_toolkit 6.X version. If you're on 5.X install a 2.X version. -* Note: If you are on a version of the Indico IPA platform pre-5.1, then install indico-toolkit==1.2.3. -### Example Useage +## Contributing -For scripted examples on how to use the toolkit, see the [examples directory](https://github.com/IndicoDataSolutions/Indico-Solutions-Toolkit/tree/main/examples) +Indico Toolkit uses Poetry 2.X for package and dependency management. -### Tests -To run the test suite you will need to set the following environment variables: HOST_URL, API_TOKEN_PATH. -You can also set WORKFLOW_ID (workflow w/ single extraction model), MODEL_NAME (extraction model name) -and DATASET_ID (uploaded dataset). If you don't set these 3 env variables, test configuration will -upload a dataset and create a workflow. +### Setup -``` -pytest +Clone the source repository with Git. + +```bash +git clone git@github.com:IndicoDataSolutions/indico-toolkit-python.git ``` -### Example +Install dependencies with Poetry. -How to get prediction results and write the results to CSV +```bash +poetry install +``` + +Formatting, linting, type checking, and tests are defined as +[Poe](https://poethepoet.natn.io/) tasks in `pyproject.toml`. +```bash +poetry run poe {format,check,test,all} ``` -from indico_toolkit.indico_wrapper import Workflow -from indico_toolkit.pipelines import FileProcessing -from indico_toolkit import create_client -WORKFLOW_ID = 1418 -HOST = "app.indico.io" -API_TOKEN_PATH = "./indico_api_token.txt" +Code changes or additions should pass `poetry run poe all` before opening a PR. + + +### Tests + +Indico Toolkit has three test suites: required unit tests, extra unit tests, and +integration tests. -# Instantiate the workflow class -client = create_client(HOST, API_TOKEN_PATH) -wflow = Workflow(client) +By default, only required unit tests are executed. Extra unit tests and integration +tests are skipped. -# Collect files to submit -fp = FileProcessing() -fp.get_file_paths_from_dir("./datasets/disclosures/") +```bash +poetry run poe {test,all} +``` -# Submit documents, await the results and write the results to CSV in batches of 10 -for paths in fp.batch_files(batch_size=10): - submission_ids = wflow.submit_documents_to_workflow(WORKFLOW_ID, paths) - submission_results = wflow.get_submission_results_from_ids(submission_ids) - for filename, result in zip(paths, submission_results): - result.predictions.to_csv("./results.csv", filename=filename, append_if_exists=True) +Extra unit tests are skipped when their dependencies are not installed. To execute extra +unit tests, install one or more extras and run the tests. +```bash +poetry install --all-extras +poetry run poe {test,all} ``` -### Contributing +Integration tests make API calls to an IPA environment and require a host and API token +to execute. These tests create datasets, setup workflows, and train models. **Expect +them to take tens of minutes to run.** -If you are adding new features to Indico Toolkit, make sure to: +```bash +poetry run poe test-integration \ + --host try.indico.io \ + --token indico_api_token.txt +``` -* Add robust integration and unit tests. -* Add a sample usage script to the 'examples/' directory. -* Add a bullet point for what the feature does to the list at the top of this README.md. -* Ensure the full test suite is passing locally before creating a pull request. -* Add doc strings for methods where usage is non-obvious. -* If you are using new pip installed libraries, make sure they are added to the setup.py and pyproject.toml. +Make liberal use of pytest's `--last-failed` and `--failed-first` +[flags](https://docs.pytest.org/en/stable/how-to/cache.html) to speed up integration +test execution when writing code. diff --git a/examples/auto_review_predictions.py b/examples/auto_review_predictions.py index a9c11d77..6fcd7655 100644 --- a/examples/auto_review_predictions.py +++ b/examples/auto_review_predictions.py @@ -1,17 +1,14 @@ """ Submit documents to a workflow, auto review them and submit them for human review """ -from indico_toolkit.auto_review import ( - AutoReviewFunction, - AutoReviewer, -) + +from indico_toolkit import create_client +from indico_toolkit.auto_review import AutoReviewer, AutoReviewFunction from indico_toolkit.auto_review.auto_review_functions import ( + accept_by_confidence, remove_by_confidence, - accept_by_confidence ) from indico_toolkit.indico_wrapper import Workflow -from indico_toolkit import create_client - WORKFLOW_ID = 1234 HOST = "app.indico.io" @@ -29,6 +26,7 @@ wf_results = wflow.get_submission_results_from_ids(submission_ids) predictions = wf_results[0].predictions.to_list() + # Set up custom review function def custom_function(predictions, labels: list = None, match_text: str = ""): for pred in predictions: @@ -39,9 +37,13 @@ def custom_function(predictions, labels: list = None, match_text: str = ""): # Set up review functions and review predictions functions = [ - AutoReviewFunction(remove_by_confidence, kwargs={"conf_threshold": 0.90}), # will default to all labels if labels is not provided + AutoReviewFunction( + remove_by_confidence, kwargs={"conf_threshold": 0.90} + ), # will default to all labels if labels is not provided AutoReviewFunction(accept_by_confidence, labels=["Name", "Amount"]), - AutoReviewFunction(custom_function, kwargs={"match_text": "text to match"}) # call custom auto review function + AutoReviewFunction( + custom_function, kwargs={"match_text": "text to match"} + ), # call custom auto review function ] auto_reviewer = AutoReviewer(predictions, functions) auto_reviewer.apply_reviews() @@ -50,4 +52,3 @@ def custom_function(predictions, labels: list = None, match_text: str = ""): wflow.submit_submission_review( submission_ids[0], {MODEL_NAME: auto_reviewer.updated_predictions} ) - diff --git a/examples/copy_teach_task.py b/examples/copy_teach_task.py index 15d30c30..d0b7f459 100644 --- a/examples/copy_teach_task.py +++ b/examples/copy_teach_task.py @@ -16,5 +16,5 @@ new_workflow = auto_populator.copy_teach_task( dataset_id=DATASET_ID, teach_task_id=TEACH_TASK_ID, - workflow_name=f"Copied Workflow", + workflow_name="Copied Workflow", ) diff --git a/examples/create_auto_classification_workflow.py b/examples/create_auto_classification_workflow.py index a568020f..355adeb4 100644 --- a/examples/create_auto_classification_workflow.py +++ b/examples/create_auto_classification_workflow.py @@ -2,9 +2,8 @@ from indico_toolkit.auto_populate import AutoPopulator """ -Create an Indico Classification Workflow without any labeling -using an organized directory/folder structure. Each folder/directory should contain only one file -type. +Create an Indico Classification Workflow without any labeling using an organized +directory/folder structure. Each folder/directory should contain only one file type. For example, you would target '/base_directory/' if you had your files organized like: @@ -25,4 +24,4 @@ "My dataset", "My workflow", "My teach task", -) \ No newline at end of file +) diff --git a/examples/create_full_structure.py b/examples/create_full_structure.py index c022f211..25f0d4eb 100644 --- a/examples/create_full_structure.py +++ b/examples/create_full_structure.py @@ -24,7 +24,7 @@ files_to_upload=["./path_to_file"], read_api=True, single_column=False, - **optional_ocr_options + **optional_ocr_options, ) # creates workflow diff --git a/examples/dataset_tasks.py b/examples/dataset_tasks.py index dd11a8e1..a53a2196 100644 --- a/examples/dataset_tasks.py +++ b/examples/dataset_tasks.py @@ -1,6 +1,6 @@ +from indico_toolkit import create_client from indico_toolkit.indico_wrapper import Datasets, Download from indico_toolkit.pipelines import FileProcessing -from indico_toolkit import create_client DATASET_ID = 1234 HOST = "app.indico.io" @@ -23,4 +23,3 @@ for paths in fp.batch_files(batch_size=2): datasets.add_files_to_dataset(paths) print(f"Uploaded {len(paths)} files") - diff --git a/examples/ground_truth_metrics.py b/examples/ground_truth_metrics.py index 84e8bf78..bf1dc1a9 100644 --- a/examples/ground_truth_metrics.py +++ b/examples/ground_truth_metrics.py @@ -1,13 +1,15 @@ """ Compare a snapshot containing ground truth to a snapshot containing model predictions """ + import pandas as pd from indico_toolkit.metrics import CompareGroundTruth """ -Example 1: GT and MODEL PREDICTIONS LISTS FOR A SINGLE DOCUMENT: Say you have the lists of prediction dictionaries for the ground truth -and the model predictions for a single document. Get the metrics for each label as well as metrics for the overall document. +Example 1: GT and MODEL PREDICTIONS LISTS FOR A SINGLE DOCUMENT: Say you have the lists +of prediction dictionaries for the ground truth and the model predictions for a single +document. Get the metrics for each label as well as metrics for the overall document. """ # Replace with your ground truth and model prediction list of dictionaries. @@ -56,7 +58,9 @@ print(cgt_instance.overall_metrics) """ -Example 2: MULTIPLE DOCS FROM GT SNAPSHOT & MODEL PREDS SNAPSHOT: Say you have the ground truth and the model predictions for a set of documents in snapshot form. Write to disk a merged snapshot with resulting metrics for each document. +Example 2: MULTIPLE DOCS FROM GT SNAPSHOT & MODEL PREDS SNAPSHOT: Say you have the +ground truth and the model predictions for a set of documents in snapshot form. Write +to disk a merged snapshot with resulting metrics for each document. """ # Add in your pathways to your ground truth and model pred snapshot csv's preds_df = pd.read_csv("./example_snapshot_predictions.csv") @@ -69,7 +73,8 @@ all_label_metrics_lst = [] overall_label_metrics_lst = [] -# For each document, pull out the ground truth and predictions, instantiate the CGT class, and print out the metrics for each document +# For each document, pull out the ground truth and predictions, instantiate the CGT +# class, and print out the metrics for each document for ind in gt_and_preds_df.index: ground_truth = eval(gt_and_preds_df["Ground_Truth"][ind]) preds = eval(gt_and_preds_df["Predictions"][ind]) diff --git a/examples/manipulate_pdfs.py b/examples/manipulate_pdfs.py index 7a42d960..8294563b 100644 --- a/examples/manipulate_pdfs.py +++ b/examples/manipulate_pdfs.py @@ -1,6 +1,7 @@ """ Given a dataset of PDFs, write some subset of pages from each PDF to new PDFs """ + from indico_toolkit.pipelines import FileProcessing from indico_toolkit.pipelines.pdf_manipulation import ManipulatePDF @@ -14,8 +15,6 @@ for pdf_path in fp: pdf = ManipulatePDF(pdf_path) if pdf.page_count > 8: - new_pdf_path = fp.join_paths( - OUTPUT_DIRECTORY, fp.file_name_from_path(pdf_path) - ) + new_pdf_path = fp.join_paths(OUTPUT_DIRECTORY, fp.file_name_from_path(pdf_path)) pdf.write_subset_of_pages(new_pdf_path, PAGES_TO_KEEP) pdf.close_doc() diff --git a/examples/merge_snapshots.py b/examples/merge_snapshots.py index f84e3ebf..6b004d3f 100644 --- a/examples/merge_snapshots.py +++ b/examples/merge_snapshots.py @@ -1,6 +1,6 @@ from indico_toolkit import create_client -from indico_toolkit.snapshots import Snapshot from indico_toolkit.indico_wrapper import Datasets +from indico_toolkit.snapshots import Snapshot HOST = "app.indico.io" API_TOKEN_PATH = "./indico_api_token.txt" @@ -9,8 +9,7 @@ OUTPUT_PATH = "./merged_snapshot_output.csv" """ -EXAMPLE 1: -Merge the labels from two downloaded teach task snapshots on the same files. +EXAMPLE 1: Merge the labels from two downloaded teach task snapshots on the same files. Example usage: if you labeled different fields for the same documents in separate tasks. """ main_snap = Snapshot(PATH_TO_SNAPSHOT) @@ -18,13 +17,13 @@ main_snap.standardize_column_names() snap_to_merge.standardize_column_names() main_snap.merge_by_file_name(snap_to_merge, ensure_identical_text=True) -print(main_snap.get_all_labeled_text("Company Name")) # see what text was captured for any label +# see what text was captured for any label +print(main_snap.get_all_labeled_text("Company Name")) main_snap.to_csv(OUTPUT_PATH, only_keep_key_columns=True) """ -EXAMPLE 2: -Combine two identically labeled snapshots together, i.e. stacked on atop the other -Example usage: if you labeled two sets of documents with the same labels in separate teach tasks +EXAMPLE 2: Combine two identically labeled snapshots together +Example usage: if you labeled different documents with the same labels in separate tasks """ main_snap = Snapshot(PATH_TO_SNAPSHOT) print(main_snap.number_of_samples) @@ -32,7 +31,8 @@ main_snap.standardize_column_names() snap_to_append.standardize_column_names() main_snap.append(snap_to_append) -print(main_snap.number_of_samples) # will now include all of the samples from snap_to_append as well +# will now include all of the samples from snap_to_append as well +print(main_snap.number_of_samples) main_snap.to_csv(OUTPUT_PATH) """ @@ -49,4 +49,4 @@ target_col=main_snap.label_col, wait=False, ) -print(f"My Model Group ID is {model.id}") \ No newline at end of file +print(f"My Model Group ID is {model.id}") diff --git a/examples/model_metrics.py b/examples/model_metrics.py index b09f5d38..b5abf08b 100644 --- a/examples/model_metrics.py +++ b/examples/model_metrics.py @@ -1,8 +1,9 @@ """ Get extraction field metrics for all Model IDs in a Model Group """ -from indico_toolkit.metrics import ExtractionMetrics, CompareModels + from indico_toolkit import create_client +from indico_toolkit.metrics import CompareModels, ExtractionMetrics MODEL_GROUP_ID = 73 HOST = "app.indico.io" @@ -11,8 +12,8 @@ client = create_client(HOST, API_TOKEN_PATH) """ -Example 1: Explore and compare performance for all models in a Model Group to, for example, see improvement -over time. +Example 1: Explore and compare performance for all models in a Model Group to, for +example, see improvement over time. """ metrics = ExtractionMetrics(client) metrics.get_metrics(MODEL_GROUP_ID) diff --git a/examples/pdf_highlighter.py b/examples/pdf_highlighter.py index b6445017..3540205b 100644 --- a/examples/pdf_highlighter.py +++ b/examples/pdf_highlighter.py @@ -1,9 +1,10 @@ """ Highlight Indico Extraction Predictions on the source PDF """ -from indico_toolkit.indico_wrapper import Workflow -from indico_toolkit.highlighter import Highlighter + from indico_toolkit import create_client +from indico_toolkit.highlighter import Highlighter +from indico_toolkit.indico_wrapper import Workflow WORKFLOW_ID = 1418 HOST = "app.indico.io" @@ -21,12 +22,10 @@ # Highlight Predictions onto source document and write it to disc highlighter = Highlighter(submission_result.predictions, PATH_TO_DOCUMENT) highlighter.collect_tokens(ocr_object.token_objects) -highlighter.highlight_pdf( - "./highlighted_doc.pdf", ocr_object.page_heights_and_widths -) +highlighter.highlight_pdf("./highlighted_doc.pdf", ocr_object.page_heights_and_widths) -# You can also have unique color highlights for each label group, write the label above the highlight, -# and add bookmarks of what labels appear on which pages +# You can also have unique color highlights for each label group, write the label above +# the highlight, and add bookmarks of what labels appear on which pages highlighter.highlight_pdf( "./highlighted_doc.pdf", ocr_object.page_heights_and_widths, @@ -34,4 +33,3 @@ add_label_annotations=True, add_bookmarks=True, ) - diff --git a/examples/poll_auto_review.py b/examples/poll_auto_review.py index 8a22599b..1ab70c26 100644 --- a/examples/poll_auto_review.py +++ b/examples/poll_auto_review.py @@ -11,9 +11,9 @@ from indico import IndicoConfig -from indico_toolkit.polling import AutoReviewPoller, AutoReviewed from indico_toolkit.etloutput import EtlOutput -from indico_toolkit.results import Result, Document +from indico_toolkit.polling import AutoReviewed, AutoReviewPoller +from indico_toolkit.results import Document, Result async def auto_review( diff --git a/examples/results_autoreview.py b/examples/results_autoreview.py index 3017910e..9069d65d 100644 --- a/examples/results_autoreview.py +++ b/examples/results_autoreview.py @@ -1,6 +1,7 @@ """ Minimal auto review example for single-document submissions. """ + from operator import attrgetter from typing import Any diff --git a/examples/results_dataclasses.py b/examples/results_dataclasses.py index 0d84a113..e43f1849 100644 --- a/examples/results_dataclasses.py +++ b/examples/results_dataclasses.py @@ -93,16 +93,25 @@ predictions.form_extractions # List of all form extraction predictions predictions.unbundlings # List of all unbundling predictions -predictions.apply() # Apply a function to all predictions -predictions.groupby() # Group predictions into a dictionary by some attribute (e.g. label) -predictions.orderby() # Sort predictions by some attribute (e.g. confidence) -predictions.where() # Filter predictions by some predicate (e.g. model, label, confidence) -predictions.to_changes(result) # Get this list of predictions as changes for `SubmitReview` - -predictions.extractions.accept() # Accept all extractions in this list (e.g. after filtering) -predictions.extractions.reject() # Reject all extractions in this list (e.g. after filtering) -predictions.extractions.unaccept() # Unaccept all extractions in this list (e.g. after filtering) -predictions.extractions.unreject() # Unreject all extractions in this list (e.g. after filtering) +# Apply a function to all predictions +predictions.apply() +# Group predictions into a dictionary by some attribute (e.g. label) +predictions.groupby() +# Sort predictions by some attribute (e.g. confidence) +predictions.orderby() +# Filter predictions by some predicate (e.g. model, label, confidence) +predictions.where() +# Get this list of predictions as changes for `SubmitReview` +predictions.to_changes(result) + +# Accept all extractions in this list (e.g. after filtering) +predictions.extractions.accept() +# Reject all extractions in this list (e.g. after filtering) +predictions.extractions.reject() +# Unaccept all extractions in this list (e.g. after filtering) +predictions.extractions.unaccept() +# Unreject all extractions in this list (e.g. after filtering) +predictions.extractions.unreject() # Prediction Dataclass @@ -112,7 +121,8 @@ prediction.label prediction.confidence # Confidence of the predicted label prediction.confidences # Confidences of all labels -prediction.extras # Other attributes from the result file prediction dict that are not explicitly parsed +# Other attributes from the result file prediction dict that are not explicitly parsed +prediction.extras # Extraction Dataclass (Subclass of Prediction) diff --git a/examples/row_association.py b/examples/row_association.py index 2c7e9345..22189dd7 100644 --- a/examples/row_association.py +++ b/examples/row_association.py @@ -1,33 +1,35 @@ from indico_toolkit.association import LineItems -# Example extraction prediction result from model/workflow (use full list of predictions) +# Example extraction prediction result (use full list of predictions) PREDICTIONS = [ - {"label": "line_date", "start": 12, "end": 18, "text": "1/2/2021"}, - {"label": "line_value", "start": 20, "end": 23, "text": "$12"}, - ] -# Example OCR Token, can be retrieved from workflow result with Workflow.get_ondoc_ocr_from_etl_url + {"label": "line_date", "start": 12, "end": 18, "text": "1/2/2021"}, + {"label": "line_value", "start": 20, "end": 23, "text": "$12"}, +] +# Example OCR Token retrieved from workflow result with +# Workflow.get_ondoc_ocr_from_etl_url OCR_TOKENS = [ - { + { "page_num": 0, "position": { "bbBot": 100, "bbTop": 0, "bbLeft": 423, - "bbRight": 833 - }, + "bbRight": 833, }, - ] + }, +] litems = LineItems( - predictions=PREDICTIONS, - # fields from your model that should be treated as line items - line_item_fields=["line_value", "line_date"], - ) + predictions=PREDICTIONS, + # fields from your model that should be treated as line items + line_item_fields=["line_value", "line_date"], +) litems.get_bounding_boxes(ocr_tokens=OCR_TOKENS) -# adds "row_number", page_num, and bounding box metadata to every line_item_fields prediction dictionary +# adds "row_number", page_num, and bounding box metadata to every line_item_fields +# prediction dictionary litems.assign_row_number() # all predictions with added metadata -> List[dict] print(litems.updated_predictions) # only line item predictions grouped together -> List[List[dict]] -print(litems.grouped_line_items) \ No newline at end of file +print(litems.grouped_line_items) diff --git a/examples/submitting_to_doc_extraction.py b/examples/submitting_to_doc_extraction.py index 71b9435d..2bc20cc2 100644 --- a/examples/submitting_to_doc_extraction.py +++ b/examples/submitting_to_doc_extraction.py @@ -1,6 +1,6 @@ +from indico_toolkit import create_client from indico_toolkit.indico_wrapper import DocExtraction from indico_toolkit.pipelines import FileProcessing -from indico_toolkit import create_client """ Retrieves a list of raw full document texts for all files in a folder diff --git a/examples/submitting_to_workflow.py b/examples/submitting_to_workflow.py index 2d9cf2b9..752f4de2 100644 --- a/examples/submitting_to_workflow.py +++ b/examples/submitting_to_workflow.py @@ -1,6 +1,6 @@ +from indico_toolkit import create_client from indico_toolkit.indico_wrapper import Workflow from indico_toolkit.pipelines import FileProcessing -from indico_toolkit import create_client WORKFLOW_ID = 1418 HOST = "app.indico.io" @@ -19,4 +19,6 @@ submission_ids = wflow.submit_documents_to_workflow(WORKFLOW_ID, paths) submission_results = wflow.get_submission_results_from_ids(submission_ids) for filename, result in zip(paths, submission_results): - result.predictions.to_csv("./results.csv", filename=filename, append_if_exists=True) + result.predictions.to_csv( + "./results.csv", filename=filename, append_if_exists=True + ) diff --git a/indico_toolkit/__init__.py b/indico_toolkit/__init__.py index 2797debf..2e2772d7 100644 --- a/indico_toolkit/__init__.py +++ b/indico_toolkit/__init__.py @@ -1,5 +1,24 @@ -"""A package to support Indico IPA development""" -__version__ = "6.1.0" +"""Classes, functions, and abstractions for Indico IPA""" -from .errors import * from .client import create_client +from .errors import ( + ToolkitAuthError, + ToolkitError, + ToolkitInputError, + ToolkitInstantiationError, + ToolkitPopulationError, + ToolkitStaggeredLoopError, + ToolkitStatusError, +) + +__all__ = ( + "create_client", + "ToolkitAuthError", + "ToolkitError", + "ToolkitInputError", + "ToolkitInstantiationError", + "ToolkitPopulationError", + "ToolkitStaggeredLoopError", + "ToolkitStatusError", +) +__version__ = "6.14.0" diff --git a/indico_toolkit/association/__init__.py b/indico_toolkit/association/__init__.py index 6797c057..0c700575 100644 --- a/indico_toolkit/association/__init__.py +++ b/indico_toolkit/association/__init__.py @@ -3,3 +3,13 @@ from .line_items import LineItems from .positioning import Positioning from .splitting import split_prediction + +__all__ = ( + "Association", + "ExtractedTokens", + "LineItems", + "Positioning", + "sequences_exact", + "sequences_overlap", + "split_prediction", +) diff --git a/indico_toolkit/association/association.py b/indico_toolkit/association/association.py index 28ed9849..d2348c50 100644 --- a/indico_toolkit/association/association.py +++ b/indico_toolkit/association/association.py @@ -1,7 +1,8 @@ -from typing import Union, List, Dict -from collections import defaultdict from abc import ABC, abstractmethod -from indico_toolkit.types import Extractions +from collections import defaultdict +from typing import Dict, List, Union + +from ..types import Extractions class Association(ABC): diff --git a/indico_toolkit/association/extracted_tokens.py b/indico_toolkit/association/extracted_tokens.py index 4ea0a22b..6d0c853d 100644 --- a/indico_toolkit/association/extracted_tokens.py +++ b/indico_toolkit/association/extracted_tokens.py @@ -1,29 +1,30 @@ from typing import List, Union -from copy import deepcopy -from .association import sequences_overlap, Association, _check_if_token_match_found -from indico_toolkit.types import Extractions + +from ..types import Extractions +from .association import Association, _check_if_token_match_found, sequences_overlap # TODO: add test that 'errored_predictions' actually works with ValueErrors class ExtractedTokens(Association): """ - Class to collect all extracted tokens, e.g. to enable highlighting of source document + Class to collect all extracted tokens, e.g. to enable highlighting of source + document """ - def __init__( - self, predictions: Union[List[dict], Extractions], - ): + def __init__(self, predictions: Union[List[dict], Extractions]): super().__init__(predictions) def match_pred_to_token(self, pred: dict, ocr_tokens: List[dict], pred_index: int): """ - Append matching token positions to self.mapped_positions, if no matches for pred, raise ValueError + Append matching token positions to self.mapped_positions, if no matches for + pred, raise ValueError Args: pred (dict): Indico extraction model prediction ocr_tokens (List[dict]): List of OCR tokens - pred_index (int): unique number for each prediction so that tokens can be linked to it + pred_index (int): unique number for each prediction so that tokens can be + linked to it Raises: ValueError: No matching token was found @@ -46,21 +47,22 @@ def match_pred_to_token(self, pred: dict, ocr_tokens: List[dict], pred_index: in _check_if_token_match_found(pred, no_match) return match_token_index - def collect_tokens( - self, ocr_tokens: List[dict], raise_for_no_match: bool = True, - ): + def collect_tokens(self, ocr_tokens: List[dict], raise_for_no_match: bool = True): """ Collect all extracted tokens and with pred text and label added to dictionaries Args: ocr_tokens (List[dict]): Tokens from 'ondocument' OCR config output - raise_for_no_match (bool): raise exception if a matching token isn't found for a prediction + raise_for_no_match (bool): raise exception if a matching token isn't found for a + prediction """ self._separate_manually_added_predictions() self._predictions = self.sort_predictions_by_start_index(self._predictions) match_index = 0 for pred_ind, pred in enumerate(self._predictions): try: - match_index = self.match_pred_to_token(pred, ocr_tokens[match_index:], pred_ind) + match_index = self.match_pred_to_token( + pred, ocr_tokens[match_index:], pred_ind + ) except ValueError as e: if raise_for_no_match: raise ValueError(e) diff --git a/indico_toolkit/association/line_items.py b/indico_toolkit/association/line_items.py index 77bb0184..550deff9 100644 --- a/indico_toolkit/association/line_items.py +++ b/indico_toolkit/association/line_items.py @@ -1,14 +1,17 @@ """Associate row items""" -from typing import List, Union, Iterable, Dict + from collections import defaultdict from copy import deepcopy -from indico_toolkit.types import Extractions -from .association import sequences_overlap, Association, _check_if_token_match_found +from typing import Iterable, List, Union + +from ..types import Extractions +from .association import Association, _check_if_token_match_found, sequences_overlap class LineItems(Association): """ - Class for associating line items given extraction predictions and ondocument OCR tokens + Class for associating line items given extraction predictions and ondocument OCR + tokens Example Usage: @@ -31,8 +34,8 @@ def __init__( """ Args: predictions (List[dict]): List of extraction predictions - line_item_fields (Iterable[str]): Fields/labels to include as line item values, other values - will not be assigned a row_number. + line_item_fields (Iterable[str]): Fields/labels to include as line item values, + other values will not be assigned a row_number. """ self.predictions = self.validate_prediction_formatting(predictions) self.line_item_fields: Iterable[str] = line_item_fields @@ -78,13 +81,16 @@ def match_pred_to_token(pred: dict, ocr_tokens: List[dict]): return match_token_index def get_bounding_boxes( - self, ocr_tokens: List[dict], raise_for_no_match: bool = True, + self, ocr_tokens: List[dict], raise_for_no_match: bool = True ): """ - Adds keys for bounding box top/bottom/left/right and page number to line item predictions + Adds keys for bounding box top/bottom/left/right and page number to line item + predictions + Args: ocr_tokens (List[dict]): Tokens from 'ondocument' OCR config output - raise_for_no_match (bool): raise exception if a matching token isn't found for a prediction + raise_for_no_match (bool): raise exception if a matching token isn't found for a + prediction """ predictions = deepcopy(self.predictions) predictions = self._remove_unneeded_predictions(predictions) @@ -117,7 +123,8 @@ def assign_row_number(self): page_number = starting_pred["page_num"] row_number = 1 for pred in self._mapped_positions: - # if the top of one box equals the bottom of another, we still want a new line + # if the top of one box equals the bottom of another, we still want a new + # line if pred["bbTop"] >= max_bot or pred["page_num"] != page_number: row_number += 1 page_number = pred["page_num"] @@ -129,8 +136,8 @@ def assign_row_number(self): @property def grouped_line_items(self) -> List[List[dict]]: """ - After row number has been assigned to predictions, returns line item predictions as a - list of lists where each list is a row. + After row number has been assigned to predictions, returns line item predictions + as a list of lists where each list is a row. """ rows = defaultdict(list) for pred in self._mapped_positions: @@ -139,7 +146,8 @@ def grouped_line_items(self) -> List[List[dict]]: def _remove_unneeded_predictions(self, predictions: List[dict]) -> List[dict]: """ - Remove predictions that are not line item fields or don't have valid start/end index data + Remove predictions that are not line item fields or don't have valid start/end + index data """ valid_line_item_preds = [] for pred in predictions: @@ -159,8 +167,9 @@ def is_line_item_pred(self, pred: dict): def _get_first_valid_line_item_pred(self) -> dict: if len(self._mapped_positions) == 0: - raise Exception( - "Whoops! You have no line_item_fields predictions. Did you run get_bounding_boxes?" + raise RuntimeError( + "Whoops! You have no line_item_fields predictions. " + "Did you run get_bounding_boxes?" ) return self._mapped_positions[0] diff --git a/indico_toolkit/association/positioning.py b/indico_toolkit/association/positioning.py index e50e7dfe..53bca1cc 100644 --- a/indico_toolkit/association/positioning.py +++ b/indico_toolkit/association/positioning.py @@ -1,15 +1,15 @@ from math import sqrt - -from indico_toolkit.errors import ToolkitInputError from typing import List +from ..errors import ToolkitInputError + class Positioning: """ Class to help identify relative positions in a document using bounding box data. - Positions are expected to contain, at a minimum, the following top-level keys: "bbTop", "bbBot", - "bbLeft", "bbRight", "page_num". + Positions are expected to contain, at a minimum, the following top-level + keys: "bbTop", "bbBot", "bbLeft", "bbRight", "page_num". """ def __init__(self): @@ -23,7 +23,8 @@ def positioned_above( Args: above_pos (dict): the position expected to be above below_pos (dict): to position expected to be below - must_be_same_page (bool, optional): required to be on same page. Defaults to True. + must_be_same_page (bool, optional): required to be on same page. Defaults to + True. Returns: bool: True if above_pos is above below_pos @@ -44,25 +45,29 @@ def positioned_above( return is_above def positioned_above_overlap( - self, above_pos: dict, below_pos: dict, min_overlap_percent: float = None - ) -> bool: + self, above_pos: dict, below_pos: dict, min_overlap_percent: float = None + ) -> bool: """ - Check if the location of one box is on the same page and above another and if the lower box's overlap is at least the given percentage. + Check if the location of one box is on the same page and above another and if + the lower box's overlap is at least the given percentage. + Args: above_pos (dict): the position expected to be above below_pos (dict): the position expected to be below - min_overlap_percent (float, optional): the minimum amount of overlap needed. Defaults to None. + min_overlap_percent (float, optional): the minimum amount of overlap needed. + Defaults to None. Returns: - bool: True if above_pos is above below_pos and below_pos' amount of overlap is at least min_overlap_percent + bool: True if above_pos is above below_pos and below_pos' amount of overlap + is at least min_overlap_percent """ is_above = False is_min_overlap = True if below_pos["page_num"] != above_pos["page_num"]: - raise ToolkitInputError( - "Predictions are not on the same page!" - ) - if self.xaxis_overlap(above_pos, below_pos) and self.yaxis_above(above_pos, below_pos): + raise ToolkitInputError("Predictions are not on the same page!") + if self.xaxis_overlap(above_pos, below_pos) and self.yaxis_above( + above_pos, below_pos + ): is_above = True overlap_amount = self.get_horizontal_overlap(above_pos, below_pos) if min_overlap_percent and overlap_amount < min_overlap_percent: @@ -77,7 +82,8 @@ def positioned_on_same_level( Args: pos1 (dict): first position pos2 (dict): second position - must_be_same_page (bool, optional): required to be on same page. Defaults to True. + must_be_same_page (bool, optional): required to be on same page. Defaults to + True. Returns: bool: True if positions on same level, else False @@ -94,11 +100,13 @@ def get_min_distance( self, pos1: dict, pos2: dict, page_height: int = None ) -> float: """ - Get the minimum distance between any two corners of two bounding boxes via the pythagorean formula. + Get the minimum distance between any two corners of two bounding boxes via the + pythagorean formula. + Args: - page_height (int, optional): If you want to measure distances across pages, set the OCR page height - otherwise locations on separate pages will raise an exception. - Defaults to None. + page_height (int, optional): If you want to measure distances across pages, + set the OCR page height otherwise locations on separate pages will raise + an exception. Defaults to None. Returns: float: minimum distance @@ -138,11 +146,12 @@ def get_horizontal_overlap(self, pos1: dict, pos2: dict) -> float: """ page_difference = abs(pos1["page_num"] - pos2["page_num"]) if page_difference > 0: - raise ToolkitInputError( - "Predictions are not on the same page!" - ) + raise ToolkitInputError("Predictions are not on the same page!") if self.xaxis_overlap(pos1, pos2): - horizontal_overlap_distance = abs(max(pos1["bbLeft"], pos2["bbLeft"]) - min(pos1["bbRight"], pos2["bbRight"])) + horizontal_overlap_distance = abs( + max(pos1["bbLeft"], pos2["bbLeft"]) + - min(pos1["bbRight"], pos2["bbRight"]) + ) position_width = abs(pos2["bbLeft"] - pos2["bbRight"]) return horizontal_overlap_distance / position_width else: @@ -156,17 +165,19 @@ def get_vertical_overlap(self, pos1: dict, pos2: dict) -> float: """ page_difference = abs(pos1["page_num"] - pos2["page_num"]) if page_difference > 0: - raise ToolkitInputError( - "Predictions are not on the same page!" - ) + raise ToolkitInputError("Predictions are not on the same page!") if self.yaxis_overlap(pos1, pos2): - vertical_overlap_distance = abs(max(pos1["bbTop"], pos2["bbTop"]) - min(pos1["bbBot"], pos2["bbBot"])) + vertical_overlap_distance = abs( + max(pos1["bbTop"], pos2["bbTop"]) - min(pos1["bbBot"], pos2["bbBot"]) + ) position_height = abs(pos2["bbTop"] - pos2["bbBot"]) return vertical_overlap_distance / position_height else: return 0.0 - - def get_tokens_within_bounds(self, bbox: dict, ocr_tokens: List[dict], include_overlap: bool=False) -> List[dict]: + + def get_tokens_within_bounds( + self, bbox: dict, ocr_tokens: List[dict], include_overlap: bool = False + ) -> List[dict]: """ Args: bbox (dict): dict with target box dimensions and page number @@ -178,25 +189,35 @@ def get_tokens_within_bounds(self, bbox: dict, ocr_tokens: List[dict], include_o page_num: int } ocr_tokens (List[dict]): on-doc OCR token output from raw or OnDoc class - include_overlap (bool, optional): Determines whether to include tokens partially inside bbox. Defaults to False. + include_overlap (bool, optional): Determines whether to include tokens + partially inside bbox. Defaults to False. + Returns: List[dict]: list of OCR tokens that fall within the specified bounding box """ if "position" not in ocr_tokens[0] or "page_num" not in ocr_tokens[0]: raise ToolkitInputError( - "Token list argument is missing required key(s): page_num and/or position" + "Token list argument is missing required key(s): " + "page_num and/or position" ) - if include_overlap == True: - return [token for token in ocr_tokens if - self.on_same_page(bbox, token) - and self.yaxis_overlap(bbox, token["position"]) and self.xaxis_overlap(bbox, token["position"])] + if include_overlap: + return [ + token + for token in ocr_tokens + if self.on_same_page(bbox, token) + and self.yaxis_overlap(bbox, token["position"]) + and self.xaxis_overlap(bbox, token["position"]) + ] else: - return [token for token in ocr_tokens if - self.on_same_page(bbox, token) - and token["position"]["bbLeft"] > bbox["bbLeft"] - and token["position"]["bbRight"] < bbox["bbRight"] - and token["position"]["bbTop"] > bbox["bbTop"] - and token["position"]["bbBot"] < bbox["bbBot"]] + return [ + token + for token in ocr_tokens + if self.on_same_page(bbox, token) + and token["position"]["bbLeft"] > bbox["bbLeft"] + and token["position"]["bbRight"] < bbox["bbRight"] + and token["position"]["bbTop"] > bbox["bbTop"] + and token["position"]["bbBot"] < bbox["bbBot"] + ] @staticmethod def get_vertical_min_distance( @@ -204,12 +225,13 @@ def get_vertical_min_distance( ) -> float: """ Get the vertical minimum distance between two bounding boxes + Args: above_pos (dict): the position expected to be above below_pos (dict): to position expected to be below - page_height (int, optional): If you want to measure distances across pages, set the OCR page height - otherwise locations on separate pages will raise an exception. - Defaults to None. + page_height (int, optional): If you want to measure distances across pages, + set the OCR page height otherwise locations on separate pages will raise + an exception. Defaults to None. Returns: float: minimum distance @@ -233,6 +255,7 @@ def get_vertical_min_distance( def get_horizontal_min_distance(pos1: dict, pos2: dict) -> float: """ Get the horizontal minimum distance between two bounding boxes + Returns: float: minimum distance """ @@ -241,7 +264,7 @@ def get_horizontal_min_distance(pos1: dict, pos2: dict) -> float: raise ToolkitInputError( "Predictions are not on the same page! Must enter a page height" ) - + min_distance_1 = abs(pos1["bbLeft"] - pos2["bbRight"]) min_distance_2 = abs(pos1["bbRight"] - pos2["bbLeft"]) return min(min_distance_1, min_distance_2) diff --git a/indico_toolkit/auto_populate/__init__.py b/indico_toolkit/auto_populate/__init__.py index 3ba5ac5e..c3330c10 100644 --- a/indico_toolkit/auto_populate/__init__.py +++ b/indico_toolkit/auto_populate/__init__.py @@ -1 +1,3 @@ -from .populator import AutoPopulator \ No newline at end of file +from .populator import AutoPopulator + +__all__ = ("AutoPopulator",) diff --git a/indico_toolkit/auto_populate/populator.py b/indico_toolkit/auto_populate/populator.py index 25832900..9b2e6396 100644 --- a/indico_toolkit/auto_populate/populator.py +++ b/indico_toolkit/auto_populate/populator.py @@ -1,23 +1,28 @@ -import time import dataclasses -import pandas as pd +import time from json import loads from os import PathLike from pathlib import Path -from typing import List, Dict, Tuple, Union +from typing import Dict, List, Tuple, Union + from indico import IndicoClient -from indico.types import Workflow from indico.queries import ( CreateExport, DownloadExport, GetDataset, - GetWorkflow, GetModelGroup, ) -from indico_toolkit.errors import ToolkitPopulationError -from indico_toolkit.structure.create_structure import Structure +from indico.types import Workflow -from .types import * +from ..errors import ToolkitPopulationError +from ..structure.create_structure import Structure +from .types import ( + Example, + ExampleList, + LabelInput, + LabelInst, + TokenSpanInput, +) class AutoPopulator: @@ -60,11 +65,11 @@ def create_auto_classification_workflow( ) -> Workflow: """ Label and train a model based on a directory structure or existing teach task. - You should have a base directory containing sub directories where each directory contains - a unique file type and only that file type. + You should have a base directory containing sub directories where each + directory contains a unique file type and only that file type. Example: - base_directory/ -> Instantiate with the page to 'base_directory' as your 'directory_path' + base_directory/ base_directory/invoices/ -> folder containing only invoices base_directory/disclosures/ -> folder containing only disclosures etc. etc. @@ -73,7 +78,7 @@ def create_auto_classification_workflow( dataset_name (str): Name of created dataset worlflow_name (str): Name of created workflow teach_task_name (str): Name of created teach task - accepted_types (Tuple[str], optional): List of accepted file types to search use + accepted_types (Tuple[str], optional): List of accepted file types to search Returns: Workflow: a Workflow object representation of the newly created workflow """ @@ -90,7 +95,8 @@ def valid_file(file: Path) -> bool: if len(classes) < 2: raise ToolkitPopulationError( - f"You must have documents in at least 2 directories, you only have {len(classes)}" + "You must have documents in at least 2 directories, " + f"you only have {len(classes)}" ) # Upload files to a new dataset. @@ -135,18 +141,24 @@ def copy_teach_task( workflow_name: str, data_column: str = "document", rename_labels: Dict[str, str] = None, - remove_labels: List[str] = None + remove_labels: List[str] = None, ) -> Workflow: """ Create duplicate teach task in same Indico platform. + Note: Does not work with datasets created with a snapshot + Args: dataset_id (int): The dataset id of the dataset you wish to copy - teach_task_id (int): The teach task id of the corresponding teach task to the dataset + teach_task_id (int): The teach task id of the corresponding teach task to + the dataset workflow_name (string): The name of the newly created workflow - data_column_id (str, optional): The datacolumn id of the corresponding dataset. Defaults to 'document' - rename_labels (dict, optional): Dictionary in format {old_label_name : new_label_name} + data_column_id (str, optional): The datacolumn id of the corresponding + dataset. Defaults to 'document' + rename_labels (dict, optional): Dictionary in format + {old_label_name : new_label_name} remove_labels (list, optional): List of labels to remove from old teach task + Returns: Workflow: a Workflow object representation of the newly created workflow """ @@ -199,13 +211,15 @@ def copy_teach_task( old_example_id = row[0] old_examples = self._get_example_list(old_model_group_id) targets_list = loads(row[2])["targets"] - file_to_targets[old_examples.get_example(old_example_id).data_file_name] = targets_list + file_to_targets[old_examples.get_example(old_example_id).data_file_name] = ( + targets_list + ) labels = self.get_labels_by_filename( new_model_group_id, file_to_targets, new_target_name_map, rename_labels, - remove_labels + remove_labels, ) # Label new teach task result = self.structure.label_teach_task( @@ -213,7 +227,7 @@ def copy_teach_task( labels=[dataclasses.asdict(label) for label in labels], model_group_id=new_model_group_id, ) - if result["submitLabelsV2"]["success"] == False: + if not result["submitLabelsV2"]["success"]: raise ToolkitPopulationError("Error: Failed to submit labels") return workflow @@ -223,7 +237,7 @@ def inject_labels_into_teach_task( teach_task_id: int, file_to_targets: dict, rename_labels: Dict[str, str] = None, - remove_labels: List[str] = None + remove_labels: List[str] = None, ): """ Add label data into existing teach task @@ -231,24 +245,22 @@ def inject_labels_into_teach_task( Args: workflow_id (int): Id of the workflow you wish to add labels to teach_task_id (int): Id of the corresponding teach task to the workflow - file_to_targets (dict): mapping of filenames to target label data - rename_labels (dict, optional): Dictionary in format {old_label_name : new_label_name} + file_to_targets (dict): mapping of filenames to target label data + rename_labels (dict, optional): Dictionary in format + {old_label_name : new_label_name} remove_labels (list, optional): List of labels to remove from old teach task """ - workflow = GetWorkflow(workflow_id) ( labelset_id, model_group_id, target_name_map, - ) = self._get_teach_task_details( - teach_task_id - ) + ) = self._get_teach_task_details(teach_task_id) labels = self.get_labels_by_filename( model_group_id, file_to_targets, target_name_map, rename_labels, - remove_labels + remove_labels, ) # Label new teach task result = self.structure.label_teach_task( @@ -256,26 +268,28 @@ def inject_labels_into_teach_task( labels=[dataclasses.asdict(label) for label in labels], model_group_id=model_group_id, ) - if result["submitLabelsV2"]["success"] == False: + if not result["submitLabelsV2"]["success"]: raise ToolkitPopulationError("Error: Failed to submit labels") - + def get_labels_by_filename( self, model_group_id: int, file_to_targets: dict, target_name_map: dict, rename_labels: Dict[str, str] = None, - remove_labels: List[str] = None + remove_labels: List[str] = None, ) -> List[LabelInput]: """ Args: model_group_id (int): ID of the model group to be labeled file_to_targets (dict): mapping in the format {filename : targets_list} target_name_map (dict): mapping of field name to corresponding target ID - rename_labels (dict, optional): Dictionary in format {old_label_name : new_label_name} + rename_labels (dict, optional): Dictionary in format + {old_label_name : new_label_name} remove_labels (list, optional): List of labels to remove from old teach task + Returns: - A list of LabelInput to be ingested by the platform via submitLabelsV2 + A list of LabelInput to be ingested by the platform via submitLabelsV2 """ labels = [] # Retrieve examples and match against filename @@ -286,16 +300,17 @@ def get_labels_by_filename( targets_list = self._edit_labels( targets_list, rename_labels, remove_labels ) - targets_list = self._convert_label( - targets_list, target_name_map - ) + targets_list = self._convert_label(targets_list, target_name_map) example_id = examples.get_example_id(filename) if example_id: labels.append(LabelInput(example_id, targets_list)) return labels def _edit_labels( - self, targets_list: List[dict], rename_labels: Dict[str, str], remove_labels: List[str] + self, + targets_list: List[dict], + rename_labels: Dict[str, str], + remove_labels: List[str], ): new_targets_list = [] for target in targets_list: @@ -304,7 +319,7 @@ def _edit_labels( target["label"] = rename_labels[target["label"]] new_targets_list.append(target) return new_targets_list - + def _convert_label( self, targets_list: List[dict], target_name_map: dict ) -> List[LabelInst]: @@ -313,9 +328,7 @@ def _convert_label( updated_label = LabelInst(target_name_map[target["label"]]) if target.get("spans"): updated_spans = [ - TokenSpanInput( - span["start"], span["end"], span["page_num"] - ) + TokenSpanInput(span["start"], span["end"], span["page_num"]) for span in target["spans"] ] updated_label.spans = updated_spans @@ -335,7 +348,7 @@ def _get_teach_task_details(self, teach_task_id: int): for target in target_names: target_name_map[target["name"]] = target["id"] return labelset_id, model_group_id, target_name_map - + def _get_example_list(self, model_group_id: int, limit=1000): examples = self.structure.get_example_ids( model_group_id=model_group_id, limit=limit @@ -346,4 +359,4 @@ def _get_example_list(self, model_group_id: int, limit=1000): for i in examples["modelGroup"]["pagedExamples"]["examples"] ] ) - return examples \ No newline at end of file + return examples diff --git a/indico_toolkit/auto_populate/types.py b/indico_toolkit/auto_populate/types.py index fa1299de..dae2460d 100644 --- a/indico_toolkit/auto_populate/types.py +++ b/indico_toolkit/auto_populate/types.py @@ -1,40 +1,46 @@ from dataclasses import dataclass from typing import List + @dataclass class Example: id: int data_file_name: str + class ExampleList: def __init__(self, examples: List[Example]): self.examples = examples - + def get_example(self, example_id: int) -> Example: """ - Returns example with matching example_id. If no matching example id found, return None + Returns example with matching example_id. If no matching example id found, + return None. """ for example in self.examples: if example.id == example_id: return example return None - + def get_example_id(self, example_data_file_name: str) -> int: """ - Returns id for a specific example with the same name as example_data_file_name. If no matching example found, return None - Assumes no duplicate filenames in dataset + Returns id for a specific example with the same name as example_data_file_name. + If no matching example found, return None. Assumes no duplicate filenames in + dataset """ for example in self.examples: if example.data_file_name == example_data_file_name: return example.id return None + @dataclass class TokenSpanInput: start: int end: int pageNum: int + @dataclass class SpatialSpanInput: top: int @@ -43,16 +49,18 @@ class SpatialSpanInput: right: int pageNum: int + @dataclass class LabelInst: clsId: int spans: List[TokenSpanInput] = None bounds: List[SpatialSpanInput] = None + @dataclass class LabelInput: exampleId: int targets: List[LabelInst] rejected: bool = None override: bool = None - partial: bool = None \ No newline at end of file + partial: bool = None diff --git a/indico_toolkit/auto_review/__init__.py b/indico_toolkit/auto_review/__init__.py index f2524f90..b7b67e10 100644 --- a/indico_toolkit/auto_review/__init__.py +++ b/indico_toolkit/auto_review/__init__.py @@ -1 +1,6 @@ from .auto_reviewer import AutoReviewer, AutoReviewFunction + +__all__ = ( + "AutoReviewer", + "AutoReviewFunction", +) diff --git a/indico_toolkit/auto_review/auto_review_functions.py b/indico_toolkit/auto_review/auto_review_functions.py index c0a38955..5e2b3028 100644 --- a/indico_toolkit/auto_review/auto_review_functions.py +++ b/indico_toolkit/auto_review/auto_review_functions.py @@ -1,6 +1,5 @@ -from typing import List from collections import defaultdict - +from typing import List ACCEPTED = "accepted" REJECTED = "rejected" diff --git a/indico_toolkit/auto_review/auto_reviewer.py b/indico_toolkit/auto_review/auto_reviewer.py index 13fa067f..6149a82f 100644 --- a/indico_toolkit/auto_review/auto_reviewer.py +++ b/indico_toolkit/auto_review/auto_reviewer.py @@ -1,20 +1,22 @@ -from typing import Dict, List, Callable +from typing import Callable, Dict, List + class AutoReviewFunction: """ - Class for hosting functions to manipulate predictions before sending to + Class for hosting functions to manipulate predictions before sending to auto review Args: - function (Callable): method to be invoked when applying reviews. - The Callable must have the following arguments in the following order: - predictions (List[dict]), - labels (List[str]), - **kwargs, - + function (Callable): method to be invoked when applying reviews. + The Callable must have the following arguments in the following order: + predictions (List[dict]), + labels (List[str]), + **kwargs, labels (List[str]): list of labels to invoke method on. Defaults to all labels - kwargs (Dict[str, str]): dictionary containing additional arguments needed in calling function + kwargs (Dict[str, str]): dictionary containing additional arguments needed in + calling function """ + def __init__( self, function: Callable, @@ -31,7 +33,6 @@ def apply(self, predictions: List[dict]): return self.function(predictions, self.labels, **self.kwargs) - class AutoReviewer: """ Class for programatically reviewing workflow predictions @@ -49,7 +50,7 @@ class AutoReviewer: def __init__( self, predictions: List[dict], - functions: List[AutoReviewFunction] = [] + functions: List[AutoReviewFunction] = [], ): self.predictions = predictions self.updated_predictions = predictions @@ -59,5 +60,3 @@ def apply_reviews(self) -> list: for function in self.functions: self.updated_predictions = function.apply(self.updated_predictions) return self.updated_predictions - - diff --git a/indico_toolkit/client.py b/indico_toolkit/client.py index ec449633..1a4c4cd7 100644 --- a/indico_toolkit/client.py +++ b/indico_toolkit/client.py @@ -1,19 +1,22 @@ +from typing import Any + from indico import IndicoClient, IndicoConfig from indico.errors import IndicoAuthenticationFailed, IndicoRequestError -from indico_toolkit.errors import ToolkitAuthError -from indico_toolkit.retry import retry + +from .errors import ToolkitAuthError +from .retry import retry @retry(IndicoRequestError, ConnectionError) def create_client( host: str, - api_token_path: str = None, - api_token_string: str = None, + api_token_path: "str | None" = None, + api_token_string: "str | None" = None, verify_ssl: bool = True, - **kwargs, + **kwargs: Any, ) -> IndicoClient: """ - Instantiate your Indico API client. + Instantiate your Indico API client. Specify either the path to your token or the token string itself. """ config = IndicoConfig( @@ -27,5 +30,6 @@ def create_client( return IndicoClient(config) except IndicoAuthenticationFailed as e: raise ToolkitAuthError( - f"{e}\n\n Ensure that you are using your most recently downloaded token with the correct host URL" + f"{e}\nEnsure that you are using your most recently downloaded token with " + "the correct host URL" ) diff --git a/indico_toolkit/errors.py b/indico_toolkit/errors.py index 0eb874a4..68213dcd 100644 --- a/indico_toolkit/errors.py +++ b/indico_toolkit/errors.py @@ -1,26 +1,32 @@ class ToolkitError(Exception): pass + class ToolkitAuthError(ToolkitError): def __init__(self, msg: str): super().__init__(msg) + class ToolkitStatusError(ToolkitError): def __init__(self, msg: str): super().__init__(msg) + class ToolkitInputError(ToolkitError): def __init__(self, msg: str): super().__init__(msg) + class ToolkitInstantiationError(ToolkitError): def __init__(self, msg: str): super().__init__(msg) + class ToolkitPopulationError(ToolkitError): def __init__(self, msg: str): super().__init__(msg) + class ToolkitStaggeredLoopError(ToolkitError): def __init__(self, msg: str): super().__init__(msg) diff --git a/indico_toolkit/indico_wrapper/__init__.py b/indico_toolkit/indico_wrapper/__init__.py index 7c993624..40dd45aa 100644 --- a/indico_toolkit/indico_wrapper/__init__.py +++ b/indico_toolkit/indico_wrapper/__init__.py @@ -1,6 +1,15 @@ -from .indico_wrapper import IndicoWrapper -from .workflow import Workflow from .dataset import Datasets -from .reviewer import Reviewer from .doc_extraction import DocExtraction from .download import Download +from .indico_wrapper import IndicoWrapper +from .reviewer import Reviewer +from .workflow import Workflow + +__all__ = ( + "Datasets", + "DocExtraction", + "Download", + "IndicoWrapper", + "Reviewer", + "Workflow", +) diff --git a/indico_toolkit/indico_wrapper/dataset.py b/indico_toolkit/indico_wrapper/dataset.py index 2b357eb6..a954d5ab 100644 --- a/indico_toolkit/indico_wrapper/dataset.py +++ b/indico_toolkit/indico_wrapper/dataset.py @@ -1,15 +1,17 @@ from typing import List + from indico import IndicoClient -from indico.types import Dataset, Workflow, OcrEngine from indico.queries import ( - GetDataset, - CreateDataset, + AddDataToWorkflow, AddFiles, - DeleteDataset, + CreateDataset, CreateEmptyDataset, - AddDataToWorkflow, + DeleteDataset, + GetDataset, ) -from indico_toolkit.indico_wrapper import IndicoWrapper +from indico.types import Dataset, OcrEngine, Workflow + +from .indico_wrapper import IndicoWrapper class Datasets(IndicoWrapper): @@ -24,13 +26,17 @@ def add_files_to_dataset(self, dataset_id: int, filepaths: List[str]) -> Dataset Upload documents to an existing dataset and wait for them to OCR """ dataset = self.client.call( - AddFiles(dataset_id=dataset_id, files=filepaths, autoprocess=True, wait=True) + AddFiles( + dataset_id=dataset_id, files=filepaths, autoprocess=True, wait=True + ) ) return dataset def add_new_files_to_task(self, workflow_id: id, wait: bool = True) -> Workflow: """ - Add newly uploaded documents to an existing teach task given the task's associated workflow ID + Add newly uploaded documents to an existing teach task given the task's + associated workflow ID + Args: workflow_id (id): workflow ID associated with teach task wait (bool, optional): wait for data to be added. Defaults to True. @@ -41,17 +47,28 @@ def add_new_files_to_task(self, workflow_id: id, wait: bool = True) -> Workflow: return workflow def create_empty_dataset( - self, dataset_name: str, dataset_type: str = "DOCUMENT", ocr_engine: OcrEngine = OcrEngine.READAPI + self, + dataset_name: str, + dataset_type: str = "DOCUMENT", + ocr_engine: OcrEngine = OcrEngine.READAPI, ) -> Dataset: """ Create an empty dataset Args: name (str): Name of the dataset - dataset_type (str, optional): TEXT, IMAGE, or DOCUMENT. Defaults to "DOCUMENT". + dataset_type (str, optional): TEXT, IMAGE, or DOCUMENT. + Defaults to "DOCUMENT". """ - return self.client.call(CreateEmptyDataset(dataset_name, dataset_type, ocr_engine)) + return self.client.call( + CreateEmptyDataset(dataset_name, dataset_type, ocr_engine) + ) - def create_dataset(self, filepaths: List[str], dataset_name: str, ocr_engine: OcrEngine = OcrEngine.READAPI) -> Dataset: + def create_dataset( + self, + filepaths: List[str], + dataset_name: str, + ocr_engine: OcrEngine = OcrEngine.READAPI, + ) -> Dataset: dataset = self.client.call( CreateDataset( name=dataset_name, @@ -61,7 +78,7 @@ def create_dataset(self, filepaths: List[str], dataset_name: str, ocr_engine: Oc ) self.dataset_id = dataset.id return dataset - + def delete_dataset(self, dataset_id: int) -> bool: """ Returns True if operation is succesful @@ -70,7 +87,8 @@ def delete_dataset(self, dataset_id: int) -> bool: def get_dataset_metadata(self, dataset_id: int) -> List[dict]: """ - Get list of dataset files with information like file name, status, and number of pages + Get list of dataset files with information like file name, status, and number of + pages """ query = """ query GetDataset($id: Int) { diff --git a/indico_toolkit/indico_wrapper/doc_extraction.py b/indico_toolkit/indico_wrapper/doc_extraction.py index 82fc0691..3a10170d 100644 --- a/indico_toolkit/indico_wrapper/doc_extraction.py +++ b/indico_toolkit/indico_wrapper/doc_extraction.py @@ -1,8 +1,10 @@ from typing import List, Union + from indico import IndicoClient from indico.queries import DocumentExtraction, Job -from indico_toolkit.indico_wrapper import IndicoWrapper -from indico_toolkit.ocr import OnDoc, StandardOcr, CustomOcr + +from ..ocr import CustomOcr, OnDoc, StandardOcr +from .indico_wrapper import IndicoWrapper class DocExtraction(IndicoWrapper): @@ -18,7 +20,8 @@ def __init__( ): """ Args: - preset_config (str): Options are simple, legacy, detailed, ondocument, and standard. + preset_config (str): Options are simple, legacy, detailed, ondocument, and + standard. """ self._preset_config = preset_config self.client = client @@ -31,11 +34,13 @@ def run_ocr( ) -> List[Union[StandardOcr, OnDoc, CustomOcr, str]]: """ Args: - filepaths (List[str]): List of paths to local documents you would like to submit for extraction + filepaths (List[str]): List of paths to local documents you would like to + submit for extraction text_setting (str): Options are full_text and page_texts. Returns: - extracted_data (List[Union[StandardOcr, OnDoc, CustomOcr, str]]): data from DocumentExtraction converted to OCR objects or string text + extracted_data (List[Union[StandardOcr, OnDoc, CustomOcr, str]]): data from + DocumentExtraction converted to OCR objects or string text """ jobs = self._submit_to_ocr(filepaths) extracted_data = [] @@ -50,7 +55,9 @@ def run_ocr( else: extracted_data.append(self._convert_ocr_objects(result)) else: - raise Exception(f"{filepaths[ind]} {status.status}: {status.result}.") + raise RuntimeError( + f"{filepaths[ind]} {status.status}: {status.result}." + ) return extracted_data def _submit_to_ocr(self, filepaths: List[str]) -> List[Job]: @@ -63,7 +70,10 @@ def _convert_ocr_objects( ) -> Union[StandardOcr, OnDoc, CustomOcr]: if self.json_config == {"preset_config": "ondocument"}: return OnDoc(extracted_data) - elif self.json_config == {"preset_config": "standard"} or self.json_config is None: + elif ( + self.json_config == {"preset_config": "standard"} + or self.json_config is None + ): return StandardOcr(extracted_data) else: return CustomOcr(extracted_data) diff --git a/indico_toolkit/indico_wrapper/download.py b/indico_toolkit/indico_wrapper/download.py index 2e5d2759..57be223a 100644 --- a/indico_toolkit/indico_wrapper/download.py +++ b/indico_toolkit/indico_wrapper/download.py @@ -1,17 +1,25 @@ import os from io import StringIO -import pandas as pd -from indico.types.export import Export + from indico import IndicoClient, IndicoRequestError -from indico_toolkit import ToolkitInputError -from indico_toolkit.retry import retry from indico.queries import ( - RetrieveStorageObject, - DownloadExport, CreateExport, + DownloadExport, GraphQLRequest, + RetrieveStorageObject, ) -import tqdm +from indico.types.export import Export + +from ..errors import ToolkitInputError +from ..retry import retry + +try: + import pandas as pd + + _PANDAS_INSTALLED = True +except ImportError as error: + _PANDAS_INSTALLED = False + _IMPORT_ERROR = error class Download: @@ -35,7 +43,8 @@ def get_dataset_pdfs( dataset_id (int): Dataset ID to download from labelset_id (int): ID of your labelset (from teach task) output_dir (str): Path to directory to write PDFs - max_files_to_download (int): = Max number of files to download (default: None = download all) + max_files_to_download (int): = Max number of files to download + (default: None = download all) Raises: ToolkitInputError: Exception if invalid directory path @@ -56,7 +65,7 @@ def get_dataset_pdfs( ) return num_files_downloaded - def get_uploaded_csv_dataframe(self, dataset_id: int) -> pd.DataFrame: + def get_uploaded_csv_dataframe(self, dataset_id: int) -> "pd.DataFrame": """ Get a dataframe from a CSV that has been uploaded to the platform Args: @@ -64,20 +73,27 @@ def get_uploaded_csv_dataframe(self, dataset_id: int) -> pd.DataFrame: Returns: pd.DataFrame: a dataframe representation of the CSV you uploaded """ + if not _PANDAS_INSTALLED: + raise RuntimeError( + "getting an uploaded CSV dataframe requires additional dependencies: " + "`pip install indico-toolkit[downloads]`" + ) from _IMPORT_ERROR + url = self._get_csv_download_url(dataset_id) string_df = self._retrieve_storage_object(url) return pd.read_csv(StringIO(string_df)) def get_snapshot_dataframe( self, dataset_id: int, labelset_id: int, file_info: bool = True, **kwargs - ) -> pd.DataFrame: + ) -> "pd.DataFrame": """Download a snapshot. For additional arguments, see documentation for CreateExport in the Python SDK. Args: dataset_id (int): dataset ID you're interested in labelset_id (int): ID of your labelset (from teach task) - file_info (bool, optional): Include additional file level metadata. Defaults to True. + file_info (bool, optional): Include additional file level metadata. + Defaults to True. Returns: pd.DataFrame: DataFrame with full document text and additional metadata @@ -89,12 +105,20 @@ def get_snapshot_dataframe( def _download_pdfs_from_export( self, - export_df: pd.DataFrame, + export_df: "pd.DataFrame", output_dir: str, file_name_col: str, file_url_col: str, max_files_to_download: int = None, ) -> int: + try: + import tqdm + except ImportError as error: + raise RuntimeError( + "downloading pdfs requires additional dependencies: " + "`pip install indico-toolkit[downloads]`" + ) from error + for i, row in tqdm.tqdm(export_df.iterrows()): basename = os.path.basename(row[file_name_col]) pdf_bytes = self._retrieve_storage_object(row[file_url_col]) @@ -105,7 +129,7 @@ def _download_pdfs_from_export( return export_df.shape[0] @retry(IndicoRequestError, ConnectionError) - def _download_export(self, export_id: int) -> pd.DataFrame: + def _download_export(self, export_id: int) -> "pd.DataFrame": """ Download a dataframe representation of your dataset export """ @@ -127,11 +151,13 @@ def _create_export( Args: dataset_id (int): ID of your dataset labelset_id (int): ID of your labelset (from teach task) - file_info (bool, optional): whether to include additional file metadata. Defaults to True. + file_info (bool, optional): whether to include additional file metadata. + Defaults to True. wait (bool, optional): wait for export to be created. Defaults to True. Returns: - Export: Description of dataset assets. See Python SDK for full object description + Export: Description of dataset assets. See Python SDK for full object + description """ return self.client.call( CreateExport( @@ -160,7 +186,8 @@ def _get_csv_download_url(self, dataset_id: int) -> str: } """ result = self.client.call(GraphQLRequest(query, {"id": dataset_id})) - for file in result["dataset"]["files"]: # loop through in case there are other file types uploaded to dataset + # loop through in case there are other file types uploaded to dataset + for file in result["dataset"]["files"]: if file["fileType"] == "CSV": return file["rainbowUrl"] raise ToolkitInputError(f"There are no CSVs uploaded to {dataset_id}") diff --git a/indico_toolkit/indico_wrapper/indico_wrapper.py b/indico_toolkit/indico_wrapper/indico_wrapper.py index b6b46206..6b1c5d4a 100644 --- a/indico_toolkit/indico_wrapper/indico_wrapper.py +++ b/indico_toolkit/indico_wrapper/indico_wrapper.py @@ -1,19 +1,20 @@ from typing import List, Union + +from indico import IndicoClient +from indico.errors import IndicoRequestError from indico.queries import ( - RetrieveStorageObject, + AddModelGroupComponent, + CreateStorageURLs, GraphQLRequest, JobStatus, - CreateModelGroup, ModelGroupPredict, - CreateStorageURLs + RetrieveStorageObject, ) from indico.types import Dataset, ModelGroup, Workflow -from indico import IndicoClient -from indico.errors import IndicoRequestError -from indico_toolkit import ToolkitInputError -from indico_toolkit.retry import retry -from indico_toolkit.types import Predictions +from ..errors import ToolkitStatusError +from ..retry import retry +from ..types import Predictions class IndicoWrapper: @@ -38,19 +39,21 @@ def train_model( source_col: str, target_col: str, after_component_id: int = None, - wait: bool = False, ) -> ModelGroup: """ - Train an Indico model + Train an Indico model Args: - dataset (Dataset): A dataset object (should represent an uploaded CSV dataset) + dataset (Dataset): A dataset object (should represent an uploaded CSV + dataset) workflow (Workflow): A workflow object to the corresponding dataset model_name (str): the name for your model source_col (str): the csv column that contained the text target_col (str): the csv column that contained the labels - after_component_id (int, optional): The workflow component that precedes this model group. If None, will be set - programmatically to the id of the Input OCR Extraction component. Defaults to None. - wait (bool, optional): Wait for the model to finish training. Defaults to False. + after_component_id (int, optional): The workflow component that precedes + this model group. If None, will be set programmatically to the id of the + Input OCR Extraction component. Defaults to None. + wait (bool, optional): Wait for the model to finish training. + Defaults to False. Returns: ModelGroup: Model group object @@ -58,14 +61,13 @@ def train_model( if not after_component_id: after_component_id = workflow.component_by_type("INPUT_OCR_EXTRACTION").id return self.client.call( - CreateModelGroup( + AddModelGroupComponent( name=model_name, dataset_id=dataset.id, source_column_id=dataset.datacolumn_by_name(source_col).id, - labelset_id=dataset.labelset_by_name(target_col).id, + labelset_column_id=dataset.labelset_by_name(target_col).id, workflow_id=workflow.id, after_component_id=after_component_id, - wait=wait, ) ) @@ -102,11 +104,12 @@ def get_predictions_with_model_id( options (dict, optional): Model Prediction options. Defaults to None. wait (bool, optional): Wait for predictions to finish. Defaults to True. - Returns: if wait is False, returns the job ID, else returns a list of Predictions where each - Predictions is either type Classifications or Extractions depending on your model. + Returns: if wait is False, returns the job ID, else returns a list of + Predictions where each Predictions is either type Classifications or + Extractions depending on your model. """ job = self.client.call(ModelGroupPredict(model_id, samples, load, options)) - if wait == False: + if not wait: return job.id status = self.get_job_status(job.id, wait=True) if status.status != "SUCCESS": @@ -114,4 +117,3 @@ def get_predictions_with_model_id( f"Predictions Failed, {status.status}: {status.result}" ) return [Predictions.get_obj(i) for i in status.result] - diff --git a/indico_toolkit/indico_wrapper/modelop.py b/indico_toolkit/indico_wrapper/modelop.py index 4db961f0..5fe3045e 100644 --- a/indico_toolkit/indico_wrapper/modelop.py +++ b/indico_toolkit/indico_wrapper/modelop.py @@ -1,9 +1,9 @@ -from indico import IndicoClient -from indico.queries import GraphQLRequest import json -import string from typing import Iterator +from indico import IndicoClient +from indico.queries import GraphQLRequest + # valid model option parameters TEXT_EXTRACTION_PARAMS = { "max_empty_chunk_ratio": lambda value: 0 <= value <= 1.0e5, @@ -48,7 +48,8 @@ def get_model_options( or the most recent model if `model_id` is not specified. Args: model_group_id (int): id of model group - model_id (int, optional): argument to return a specific model within a model group + model_id (int, optional): argument to return a specific model within a model + group """ all_model_options = self.get_all_model_options(model_group_id) @@ -130,24 +131,31 @@ def update_model_settings( model_type (str): type of model ("text_extraction", "text_classification") **kwargs: Advanced Model Training Options - Default values of Advanced Model Training Options and the avaiable parameters: + Default values of Advanced Model Training Options and avaiable parameters: For Text Extraction Model: - max_empty_chunk_ratio : 1.0 (min of 0, no max value: a large number effectivly turns this option off) + max_empty_chunk_ratio : 1.0 (min of 0, no max value: a large + number effectivly turns this option off) auto_negative_scaling : True - optimize_for : "predict_speed" ( "predict_speed", "accuracy", "speed", "accuracy_fp16" and "predict_speed_fp16") + optimize_for : "predict_speed" ( "predict_speed", "accuracy", + "speed", "accuracy_fp16" and "predict_speed_fp16") subtoken_predictions : True - base_model : "roberta" ("roberta", "small" (distilled version of RoBERTa), "multilingual", "fast", "textcnn", "fasttextcnn") + base_model : "roberta" ("roberta", "small" (distilled version of + RoBERTa), "multilingual", "fast", "textcnn", "fasttextcnn") class_weight : "sqrt" ("linear", "sqrt", "log", None) For Text Classification Model: - model_type : "standard" (“tfidf_lr”, “tfidf_gbt”, “standard”, “finetune”) + model_type : "standard" ("tfidf_lr", "tfidf_gbt", "standard", + "finetune") Returns: dict: Dictionary of advanced model training options """ model = self.client.call( GraphQLRequest( """ - mutation updateModelGroup($modelGroupId: Int!, $modelTrainingOptions: JSONString) { + mutation updateModelGroup( + $modelGroupId: Int!, + $modelTrainingOptions: JSONString + ) { updateModelGroupSettings( modelGroupId: $modelGroupId modelTrainingOptions: $modelTrainingOptions diff --git a/indico_toolkit/indico_wrapper/reviewer.py b/indico_toolkit/indico_wrapper/reviewer.py index a41b8891..faf6e9a4 100644 --- a/indico_toolkit/indico_wrapper/reviewer.py +++ b/indico_toolkit/indico_wrapper/reviewer.py @@ -1,7 +1,8 @@ import json -import time + from indico import IndicoClient -from indico_toolkit.indico_wrapper import Workflow + +from .indico_wrapper import Workflow class Reviewer(Workflow): @@ -22,7 +23,8 @@ def accept_review(self, submission_id: int, changes: dict) -> None: Accept a submission in the review queue Args: submission_id (int): submission ID - changes (dict): accepted predictions with format like, e.g. {"model_name": [{"label"...}]} + changes (dict): accepted predictions with format like, + e.g. {"model_name": [{"label"...}]} """ self.graphQL_request( SUBMIT_REVIEW, @@ -39,8 +41,8 @@ def get_random_review_id(self): ) try: return response["randomSubmission"]["id"] - except: - raise Exception("The review queue is empty") + except Exception: + raise RuntimeError("The review queue is empty") def get_random_exception_id(self): response = self.graphQL_request( @@ -48,17 +50,28 @@ def get_random_exception_id(self): ) try: return response["randomSubmission"]["id"] - except: - raise Exception("The exception queue is empty") + except Exception: + raise RuntimeError("The exception queue is empty") def reject_submission(self, submission_id): return self.graphQL_request( SUBMIT_REVIEW, {"rejected": True, "submissionId": submission_id} ) + SUBMIT_REVIEW = """ -mutation submitStandardQueue($changes: JSONString, $rejected: Boolean, $submissionId: Int!, $notes: String) { - submitReview(changes: $changes, rejected: $rejected, submissionId: $submissionId, notes: $notes) { +mutation submitStandardQueue( + $changes: JSONString, + $rejected: Boolean, + $submissionId: Int!, + $notes: String +) { + submitReview( + changes: $changes, + rejected: $rejected, + submissionId: $submissionId, + notes: $notes +) { id __typename } diff --git a/indico_toolkit/indico_wrapper/workflow.py b/indico_toolkit/indico_wrapper/workflow.py index 1534827e..8cdb0fad 100644 --- a/indico_toolkit/indico_wrapper/workflow.py +++ b/indico_toolkit/indico_wrapper/workflow.py @@ -1,28 +1,28 @@ -import time import io from os import PathLike -from typing import List, Union, Dict -from indico import IndicoClient, IndicoRequestError +from typing import Dict, List, Union + +from indico import IndicoClient from indico.queries import ( - Submission, - SubmissionFilter, - ListSubmissions, - UpdateSubmission, GetSubmission, GetWorkflow, - WorkflowSubmission, + JobStatus, + ListSubmissions, + Submission, + SubmissionFilter, SubmitReview, - WaitForSubmissions, + UpdateSubmission, UpdateWorkflowSettings, - JobStatus, + WaitForSubmissions, + WorkflowSubmission, ) -from indico.types import Workflow from indico.queries.submission import SubmissionResult -from .indico_wrapper import IndicoWrapper -from indico_toolkit import ToolkitStatusError -from indico_toolkit.ocr import OnDoc -from indico_toolkit.types import WorkflowResult +from indico.types import Workflow +from ..errors import ToolkitStatusError +from ..ocr import OnDoc +from ..types import WorkflowResult +from .indico_wrapper import IndicoWrapper COMPLETE_FILTER = SubmissionFilter(status="COMPLETE", retrieved=False) PENDING_REVIEW_FILTER = SubmissionFilter(status="PENDING_REVIEW", retrieved=False) @@ -54,15 +54,13 @@ def submit_documents_to_workflow( Args: workflow_id (int): Workflow to submit to pdf_filepaths (List[str]): Path to local documents you would like to submit - streams (Dict[str, io.BufferedIOBase]): List of filename keys mapped to streams - for upload. + streams (Dict[str, io.BufferedIOBase]): List of filename keys mapped to + streams for upload. Returns: List[int]: List of unique and persistent identifier for each submission. """ return self.client.call( - WorkflowSubmission( - workflow_id=workflow_id, files=files, streams=streams - ) + WorkflowSubmission(workflow_id=workflow_id, files=files, streams=streams) ) def get_ondoc_ocr_from_etl_url(self, etl_url: str) -> OnDoc: @@ -125,14 +123,16 @@ def get_submission_results_from_ids( ignore_deleted_submissions: bool = False, ) -> List[WorkflowResult]: """ - Wait for submission to pass through workflow models and get result. If Review is enabled, - result may be retrieved prior to human review. + Wait for submission to pass through workflow models and get result. If Review is + enabled, result may be retrieved prior to human review. Args: submission_ids (List[int]): Ids of submission predictions to retrieve timeout (int): seconds permitted for each submission prior to timing out - return_raw_json: (bool) = If True return raw json result, otherwise return WorkflowResult object. - raise_exception_for_failed (bool): if True, ToolkitStatusError raised for failed submissions + return_raw_json: (bool) = If True return raw json result, otherwise return + WorkflowResult object. + raise_exception_for_failed (bool): if True, ToolkitStatusError raised for + failed submissions return_failed_results (bool): if True, return objects for failed submissions ignore_deleted_submissions (bool): if True, ignore deleted submissions @@ -205,8 +205,8 @@ def wait_for_submissions_to_process( self, submission_ids: List[int], timeout_seconds: int = 180 ) -> None: """ - Wait for submissions to reach a terminal status of "COMPLETE", "PENDING_AUTO_REVIEW", - "FAILED", or "PENDING_REVIEW" + Wait for submissions to reach a terminal status of "COMPLETE", + "PENDING_AUTO_REVIEW", "FAILED", or "PENDING_REVIEW" """ self.client.call(WaitForSubmissions(submission_ids, timeout_seconds)) @@ -228,4 +228,4 @@ def _error_handle(self, message: str, ignore_exceptions: bool): if ignore_exceptions: print(f"Ignoring exception and continuing: {message}") else: - raise Exception(message) + raise RuntimeError(message) diff --git a/indico_toolkit/metrics/__init__.py b/indico_toolkit/metrics/__init__.py index 1f972572..94987923 100644 --- a/indico_toolkit/metrics/__init__.py +++ b/indico_toolkit/metrics/__init__.py @@ -1,4 +1,11 @@ -from .metrics import ExtractionMetrics +from .compare_ground_truth import CompareGroundTruth from .compare_models import CompareModels +from .metrics import ExtractionMetrics from .plotting import Plotting -from .compare_ground_truth import CompareGroundTruth + +__all__ = ( + "CompareGroundTruth", + "CompareModels", + "ExtractionMetrics", + "Plotting", +) diff --git a/indico_toolkit/metrics/compare_ground_truth.py b/indico_toolkit/metrics/compare_ground_truth.py index 4058f7b9..25948fdd 100644 --- a/indico_toolkit/metrics/compare_ground_truth.py +++ b/indico_toolkit/metrics/compare_ground_truth.py @@ -1,12 +1,14 @@ from typing import List -from indico_toolkit.association.association import sequences_overlap, sequences_exact -from indico_toolkit.types.extractions import Extractions -from indico_toolkit.errors import ToolkitInputError + +from ..association.association import sequences_exact, sequences_overlap +from ..errors import ToolkitInputError +from ..types.extractions import Extractions class CompareGroundTruth: """ - Compare a set of ground truths against a set of model predictions on a per document basis. + Compare a set of ground truths against a set of model predictions on a per document + basis. """ def __init__(self, ground_truth: List[dict], predictions: List[dict]): @@ -20,7 +22,8 @@ def __init__(self, ground_truth: List[dict], predictions: List[dict]): def set_all_label_metrics(self, span_type: str = "overlap") -> None: """ - The "all_label_metrics" dict includes each label as a key and each label's metrics as the corresponding value. + The "all_label_metrics" dict includes each label as a key and each label's + metrics as the corresponding value. """ if span_type in ["exact", "overlap"]: self.all_label_metrics = { @@ -31,7 +34,8 @@ def set_all_label_metrics(self, span_type: str = "overlap") -> None: def set_overall_metrics(self) -> None: """ - The "overall_metrics" dict includes the metrics for the entire document. (Key: metric type; value: metric value) + The "overall_metrics" dict includes the metrics for the entire document. + (Key: metric type; value: metric value) """ if self.all_label_metrics is None: self.set_all_label_metrics() @@ -81,16 +85,17 @@ def _get_base_metrics(self, label: str, span_type: str) -> dict: each overlap with a single ground truth, each pred is counted as a true positive. (i.e. There isn't a break out of the loop once a TP is found.) """ - # TODO potentially build in choice on the "multiple true positives" per ground truth prediction by adding conditional + # TODO potentially build in choice on the "multiple true positives" per ground + # truth prediction by adding conditional true_pos = 0 false_neg = 0 false_pos = 0 span_types = {"overlap": sequences_overlap, "exact": sequences_exact} span_type_func = span_types[span_type] - if not label in self.preds_by_label: + if label not in self.preds_by_label: false_neg = len(self.gt_by_label[label]) - elif not label in self.gt_by_label: + elif label not in self.gt_by_label: false_pos = len(self.preds_by_label[label]) else: for model_pred in self.preds_by_label[label]: diff --git a/indico_toolkit/metrics/compare_models.py b/indico_toolkit/metrics/compare_models.py index 6f534fc8..b72ab210 100644 --- a/indico_toolkit/metrics/compare_models.py +++ b/indico_toolkit/metrics/compare_models.py @@ -1,13 +1,19 @@ -from typing import List, Tuple, Set from collections import namedtuple -from indico import IndicoClient -import pandas as pd +from typing import List, Set, Tuple +from indico import IndicoClient -from .plotting import Plotting +from ..errors import ToolkitInputError from .metrics import ExtractionMetrics -from indico_toolkit import ToolkitInputError +from .plotting import Plotting +try: + import pandas as pd + + _PANDAS_INSTALLED = True +except ImportError as error: + _PANDAS_INSTALLED = False + _IMPORT_ERROR = error ModelIds = namedtuple("ModelIds", "group_id id") @@ -31,6 +37,12 @@ def __init__( model_group_2 (int): model group of second model model_id_2 (int): id of second model """ + if not _PANDAS_INSTALLED: + raise RuntimeError( + "comparing metrics requires additional dependencies: " + "`pip install indico-toolkit[metrics]`" + ) from _IMPORT_ERROR + self.client = client self.models = [ ModelIds(model_group_1, model_id_1), @@ -40,7 +52,7 @@ def __init__( self.overlapping_fields: Set[str] = None self.df: pd.DataFrame = None - def get_data(self, span_type: str = "overlap") -> pd.DataFrame: + def get_data(self, span_type: str = "overlap") -> "pd.DataFrame": """ Gathers metrics for both models into a dataframe, setting it to self.df Args: @@ -48,14 +60,15 @@ def get_data(self, span_type: str = "overlap") -> pd.DataFrame: """ dfs = [] for model in self.models: - metrics = self.get_metrics(model.group_id) + self.get_metrics(model.group_id) df = self.get_metrics_df(span_type, model.id) df.drop("model_id", axis=1, inplace=True) dfs.append(df) self._set_labelset_info(dfs) if len(self.overlapping_fields) == 0: raise ToolkitInputError( - f"There are no shared labels between the models you provided: {self.non_overlapping_fields}" + "There are no shared labels between the models you provided: " + f"{self.non_overlapping_fields}" ) self.df = pd.merge( dfs[0], dfs[1], on="field_name", suffixes=self._model_suffixes @@ -65,11 +78,15 @@ def get_metric_differences( self, metric: str = "f1Score", include_difference: bool = True ): """ - Get a dataframe focused on one metrics, by default sorted by a column of value differences + Get a dataframe focused on one metrics, by default sorted by a column of value + differences + Args: - metric (str, optional): possible values are 'precision', 'recall', 'f1Score', 'falsePositives', - 'falseNegatives', 'truePositives'. Defaults to "f1Score". - include_difference (bool): include a column of the most recent model ID minus the older model ID + metric (str, optional): possible values are 'precision', 'recall', + 'f1Score', 'falsePositives', 'falseNegatives', 'truePositives'. + Defaults to "f1Score". + include_difference (bool): include a column of the most recent model ID + minus the older model ID """ metric_cols = self._get_metric_col_names(metric) cols_to_keep = ["field_name", *metric_cols] @@ -87,17 +104,19 @@ def bar_plot( bar_colors: List[str] = ["salmon", "darkblue"], ): """ - Write an html bar plot to disc. Will also open the plot automatically in your browser, where - you will interactive functionality and the ability to download a copy as a PNG as well. + Write an html bar plot to disc. Will also open the plot automatically in your + browser, where you will interactive functionality and the ability to download a + copy as a PNG as well. Args: output_path (str): where you want to write plot, e.g. "./myplot.html" - metric (str, optional): possible values are 'precision', 'recall', 'f1Score', 'falsePositives', - 'falseNegatives', 'truePositives'. Defaults to "f1Score". + metric (str, optional): possible values are 'precision', 'recall', + 'f1Score', 'falsePositives', 'falseNegatives', 'truePositives'. + Defaults to "f1Score". plot_title (str, optional): Title of the plot. Defaults to "". - bar_colors (List[str], optional): length two list with the colors for your plot, can be - css color names, rgb, or hex name . - Defaults to ["#EEE8AA", "#98FB98"]. + bar_colors (List[str], optional): length two list with the colors for your + plot, can be css color names, rgb, or hex name . + Defaults to ["#EEE8AA", "#98FB98"]. """ metric_cols = self._get_metric_col_names(metric, order_descending=False) plotting = Plotting() @@ -122,7 +141,7 @@ def to_csv( Args: output_path (str): path to write CSV on your system, e.g. "./my_metrics.csv" """ - df.to_csv(output_path, index=False) + self.df.to_csv(output_path, index=False) def get_data_df(self): raise NotImplementedError( @@ -153,7 +172,7 @@ def labelset_differences(labelset1: set, labelset2: set): def _model_suffixes(self): return f"_{self.models[0].id}", f"_{self.models[1].id}" - def _set_labelset_info(self, df_list: List[pd.DataFrame]) -> Tuple[set]: + def _set_labelset_info(self, df_list: "List[pd.DataFrame]") -> Tuple[set]: labelsets = [set(i["field_name"]) for i in df_list] self.overlapping_fields = self.labelsets_overlap(labelsets[0], labelsets[1]) self.non_overlapping_fields = self.labelset_differences( diff --git a/indico_toolkit/metrics/metrics.py b/indico_toolkit/metrics/metrics.py index 034b1531..dfae1c7b 100644 --- a/indico_toolkit/metrics/metrics.py +++ b/indico_toolkit/metrics/metrics.py @@ -1,12 +1,20 @@ import json -import pandas as pd -from typing import List, Dict +from typing import Dict, List + from indico import IndicoClient -from indico_toolkit.indico_wrapper import IndicoWrapper -from indico_toolkit import ToolkitInputError +from ..errors import ToolkitInputError +from ..indico_wrapper import IndicoWrapper from .plotting import Plotting +try: + import pandas as pd + + _PANDAS_INSTALLED = True +except ImportError as error: + _PANDAS_INSTALLED = False + _IMPORT_ERROR = error + class ExtractionMetrics(IndicoWrapper): """ @@ -23,12 +31,13 @@ class ExtractionMetrics(IndicoWrapper): df = metrics.get_metrics_df(span_type="exact", select_model_id=102) print(df.head()) - # write the results to a CSV (can also optionally pass span_type/model ID here as well) - metrics.to_csv("./my_metrics.pdf") + # write the results to a CSV (can also optionally pass span_type/model ID here as + well) metrics.to_csv("./my_metrics.pdf") # get an interactive bar plot to visualize model improvement over time metrics.bar_plot("./my_bar_plot.html") """ + def __init__(self, client: IndicoClient): self.client = client self.raw_metrics: List[dict] = None @@ -55,13 +64,19 @@ def get_metrics(self, model_group_id: int): def get_metrics_df( self, span_type: str = "overlap", select_model_id: int = None - ) -> pd.DataFrame: + ) -> "pd.DataFrame": """ Get a dataframe of model metrics for a particular span type Args: span_type (str): options include 'superset', 'exact', 'overlap' or 'token' select_model_id (int): only return metrics for a particular model """ + if not _PANDAS_INSTALLED: + raise RuntimeError( + "getting a metrics datafram requires additional dependencies: " + "`pip install indico-toolkit[metrics]`" + ) from _IMPORT_ERROR + if select_model_id: if select_model_id not in self.included_models: raise ToolkitInputError( @@ -93,15 +108,16 @@ def bar_plot( ids_to_exclude: List[int] = [], ): """ - Write an html bar plot to disc to compare model IDs within a model group. - Will also open the plot automatically in your browser, where you will interactive + Write an html bar plot to disc to compare model IDs within a model group. Will + also open the plot automatically in your browser, where you will interactive functionality and the ability to download a copy as a PNG as well. Args: output_path (str): where you want to write plot, e.g. "./myplot.html" span_type (str): options include 'superset', 'exact', 'overlap' or 'token' - metric (str, optional): possible values are 'precision', 'recall', 'f1Score', 'falsePositives', - 'falseNegatives', 'truePositives'. Defaults to "f1Score". + metric (str, optional): possible values are 'precision', 'recall', + 'f1Score', 'falsePositives', 'falseNegatives', 'truePositives'. + Defaults to "f1Score". plot_title (str, optional): Title of the plot. Defaults to "". ids_to_exclude (List[int], optional): Model Ids to exclude from plot. """ @@ -109,8 +125,14 @@ def bar_plot( if ids_to_exclude: df = df.drop(df.loc[df["model_id"].isin(ids_to_exclude)].index) model_ids = sorted(list(df["model_id"].unique())) - field_order = df.loc[df["model_id"] == model_ids[-1]].sort_values(by=metric)["field_name"].tolist() - df["field_name"] = df["field_name"].astype("category").cat.set_categories(field_order) + field_order = ( + df.loc[df["model_id"] == model_ids[-1]] + .sort_values(by=metric)["field_name"] + .tolist() + ) + df["field_name"] = ( + df["field_name"].astype("category").cat.set_categories(field_order) + ) plotting = Plotting() for model_id in model_ids: sub_df = df.loc[df["model_id"] == model_id].copy() @@ -136,16 +158,18 @@ def line_plot( fields_to_exclude: List[str] = [], ): """ - Write an html line plot to disc with # of samples on x-axis, a metric on the y-axis and - each line representing a distinct field. - Will also open the plot automatically in your browser, where you will interactive - functionality and the ability to download a copy as a PNG as well. + Write an html line plot to disc with # of samples on x-axis, a metric on the + y-axis and each line representing a distinct field. + + Will also open the plot automatically in your browser, where you will + interactive functionality and the ability to download a copy as a PNG as well. Args: output_path (str): where you want to write plot, e.g. "./myplot.html" span_type (str): options include 'superset', 'exact', 'overlap' or 'token' - metric (str, optional): possible values are 'precision', 'recall', 'f1Score', 'falsePositives', - 'falseNegatives', 'truePositives'. Defaults to "f1Score". + metric (str, optional): possible values are 'precision', 'recall', + 'f1Score', 'falsePositives', 'falseNegatives', 'truePositives'. + Defaults to "f1Score". plot_title (str, optional): Title of the plot. Defaults to "". ids_to_exclude (List[int], optional): Model Ids to exclude from plot. fields_to_exclude (List[str], optional): Field Names to exclude from plot. @@ -189,21 +213,26 @@ def to_csv( df.to_csv(output_path, index=False) - class UnbundlingMetrics(ExtractionMetrics): """ Example Usage: um = UnbundlingMetrics(client) um.get_metrics(1232) - um.line_plot("./my_metric_plot.html", metric="recall", title="Insurance Model Recall Improvement") - + um.line_plot( + "./my_metric_plot.html", + metric="recall", + title="Insurance Model Recall Improvement", + ) """ + def get_metrics(self, model_group_id: int): """ Collect all metrics available based on a Model Group ID for an Unbundling model + Args: - model_group_id (int): Model Group ID that you're interestd in (available within the Explain UI) + model_group_id (int): Model Group ID that you're interestd in (available + within the Explain UI) """ results = self.graphQL_request(METRIC_QUERY, {"modelGroupId": model_group_id}) if len(results["modelGroups"]["modelGroups"]) == 0: @@ -216,18 +245,29 @@ def get_metrics(self, model_group_id: int): labeled_samples = [] for r in results: model_info = json.loads(r["modelInfo"]) - if "total_number_of_examples" not in model_info or "metrics" not in model_info: - # some dictionaries don't come back with required fields... + if ( + "total_number_of_examples" not in model_info + or "metrics" not in model_info + ): + # some dictionaries don't come back with required fields... continue labeled_samples.append(model_info["total_number_of_examples"]) included_models.append(r["id"]) raw_metrics.append(model_info["metrics"]["per_class_metrics"]) self.raw_metrics = raw_metrics self.included_models = included_models - self.number_of_samples = {model_id:samples for model_id, samples in zip(included_models, labeled_samples)} + self.number_of_samples = { + model_id: samples + for model_id, samples in zip(included_models, labeled_samples) + } + def get_metrics_df(self) -> "pd.DataFrame": + if not _PANDAS_INSTALLED: + raise RuntimeError( + "getting a metrics dataframe requires additional dependencies: " + "`pip install indico-toolkit[metrics]`" + ) from _IMPORT_ERROR - def get_metrics_df(self) -> pd.DataFrame: cleaned_metrics = [] for model_id, metrics in zip(self.included_models, self.raw_metrics): for class_name in metrics: @@ -239,7 +279,6 @@ def get_metrics_df(self) -> pd.DataFrame: df = pd.DataFrame(cleaned_metrics) return df.sort_values(by=["field_name", "model_id"], ascending=False) - def line_plot( self, output_path: str, @@ -249,16 +288,18 @@ def line_plot( fields_to_exclude: List[str] = [], ): """ - Write an html line plot to disc with # of samples on x-axis, a metric on the y-axis and - each line representing a distinct field. - Will also open the plot automatically in your browser, where you will interactive - functionality and the ability to download a copy as a PNG as well. + Write an html line plot to disc with # of samples on x-axis, a metric on the + y-axis and each line representing a distinct field. + + Will also open the plot automatically in your browser, where you will + interactive functionality and the ability to download a copy as a PNG as well. Args: output_path (str): where you want to write plot, e.g. "./myplot.html" span_type (str): options include 'superset', 'exact', 'overlap' or 'token' - metric (str, optional): possible values are 'precision', 'recall', 'f1_score', 'false_positives', - 'false_negatives', 'true_positives'. Defaults to "f1_score". + metric (str, optional): possible values are 'precision', 'recall', + 'f1_score', 'false_positives', 'false_negatives', 'true_positives'. + Defaults to "f1_score". plot_title (str, optional): Title of the plot. Defaults to "". ids_to_exclude (List[int], optional): Model Ids to exclude from plot. fields_to_exclude (List[str], optional): Field Names to exclude from plot. @@ -289,7 +330,9 @@ def line_plot( plotting.plot(output_path) def bar_plot(self): - raise NotImplementedError("Bar Plot is not currently implemented for unbundling") + raise NotImplementedError( + "Bar Plot is not currently implemented for unbundling" + ) def get_extraction_metrics(self, model_group_id: int): raise NotImplementedError("Not available for unbundling class") diff --git a/indico_toolkit/metrics/plotting.py b/indico_toolkit/metrics/plotting.py index 1fe45735..495be46d 100644 --- a/indico_toolkit/metrics/plotting.py +++ b/indico_toolkit/metrics/plotting.py @@ -1,10 +1,23 @@ -import plotly as py -import plotly.graph_objects as go from typing import Iterable, Union +try: + import plotly as py + import plotly.graph_objects as go + + _PLOTLY_INSTALLED = True +except ImportError as error: + _PLOTLY_INSTALLED = False + _IMPORT_ERROR = error + class Plotting: def __init__(self): + if not _PLOTLY_INSTALLED: + raise RuntimeError( + "plotting metrics requires additional dependencies: " + "`pip install indico-toolkit[metrics]`" + ) from _IMPORT_ERROR + self._plot_data = [] self.layout = go.Layout() @@ -20,9 +33,10 @@ def add_barplot_data( Args: x_data (Iterable[str]): Values/Groups that will go on X axis y_data (Iterable[Union[int, float]]): Values that will be plotted on y axis - name (str, optional): If you have multiple bar plot groups, the distinct name for this group, - e.g. "Model ID: 121". Defaults to "". - color (str, optional): Choose a color, can be css name or rgb like 'rgb(31, 119, 180)'. Defaults to "blue". + name (str, optional): If you have multiple bar plot groups, the distinct + name for this group, e.g. "Model ID: 121". Defaults to "". + color (str, optional): Choose a color, can be css name or rgb like + 'rgb(31, 119, 180)'. Defaults to "blue". """ self._plot_data.append( go.Bar(x=x_data, y=y_data, name=name, marker=dict(color=color)) @@ -40,9 +54,10 @@ def add_line_data( Args: x_data (Iterable[str]): Values/Groups that will go on X axis y_data (Iterable[Union[int, float]]): Values that will be plotted on y axis - name (str, optional): If you have multiple line plot groups, the distinct name for this group, - e.g. "Model ID: 121". Defaults to "". - color (str, optional): Choose a color, can be css name or rgb like 'rgb(31, 119, 180)'. Defaults to "blue". + name (str, optional): If you have multiple line plot groups, the distinct + name for this group, e.g. "Model ID: 121". Defaults to "". + color (str, optional): Choose a color, can be css name or rgb like + 'rgb(31, 119, 180)'. Defaults to "blue". """ self._plot_data.append( go.Scatter(x=x_data, y=y_data, name=name, marker=dict(color=color)) @@ -58,9 +73,11 @@ def define_layout( """ Add labels to your visualization Args: - yaxis_title (str, optional): Title for the y axis, e.g. "F1 Score". Defaults to "". + yaxis_title (str, optional): Title for the y axis, e.g. "F1 Score". + Defaults to "". xaxis_title (str, optional): Title for the x axis. Defaults to "". - legend_title (str, optional): A title for the plot's legend e.g. "Model IDs". Defaults to "". + legend_title (str, optional): A title for the plot's legend + e.g. "Model IDs". Defaults to "". plot_title (str, optional): A title above the plot. Defaults to "". """ self.layout = go.Layout( @@ -72,8 +89,10 @@ def define_layout( def plot(self, path_to_write_plot: str): """ - Write an html plot to disc. Will also open the plot automatically in your browser, where - you will interactive functionality and the ability to download a copy as a PNG as well. + Write an html plot to disc. Will also open the plot automatically in your + browser, where you will interactive functionality and the ability to download a + copy as a PNG as well. + Args: path_to_write_plot (str): where you want to write plot, e.g. "./myplot.html" """ diff --git a/indico_toolkit/ocr/__init__.py b/indico_toolkit/ocr/__init__.py index ec281fbe..fbf15bf5 100644 --- a/indico_toolkit/ocr/__init__.py +++ b/indico_toolkit/ocr/__init__.py @@ -1,3 +1,9 @@ +from .customocr_object import CustomOcr from .ondoc_object import OnDoc from .standard_object import StandardOcr -from .customocr_object import CustomOcr + +__all__ = ( + "CustomOcr", + "OnDoc", + "StandardOcr", +) diff --git a/indico_toolkit/ocr/customocr_object.py b/indico_toolkit/ocr/customocr_object.py index 2cee6d4e..78e1c79c 100644 --- a/indico_toolkit/ocr/customocr_object.py +++ b/indico_toolkit/ocr/customocr_object.py @@ -3,13 +3,14 @@ class CustomOcr: """ - CustomOcr is a helper class for the raw preset config OCR results. Enables easy extraction - of full text and page-level text. + CustomOcr is a helper class for the raw preset config OCR results. Enables easy + extraction of full text and page-level text. """ def __init__(self, customocr: Union[List[dict], dict]): """ - customocr Union[List[dict], dict]: result object from indico.queries.DocumentExtraction + customocr Union[List[dict], dict]: result object from + indico.queries.DocumentExtraction """ self.customocr = customocr @@ -26,7 +27,7 @@ def full_text(self) -> str: elif isinstance(self.customocr, list) and "pages" in self.customocr[0]: if "text" in self.customocr[0]["pages"][0]: return "\n".join(page["pages"][0]["text"] for page in self.customocr) - raise Exception(f"JSON configuration setting does not have full text.") + raise RuntimeError("JSON configuration setting does not have full text.") @property def page_texts(self) -> List[str]: @@ -38,4 +39,4 @@ def page_texts(self) -> List[str]: elif isinstance(self.customocr, list) and "pages" in self.customocr[0]: if "text" in self.customocr[0]["pages"][0]: return [page["pages"][0]["text"] for page in self.customocr] - raise Exception(f"JSON configuration setting does not have page-level text.") + raise RuntimeError("JSON configuration setting does not have page-level text.") diff --git a/indico_toolkit/ocr/ondoc_object.py b/indico_toolkit/ocr/ondoc_object.py index 59c7d370..1f5b741e 100644 --- a/indico_toolkit/ocr/ondoc_object.py +++ b/indico_toolkit/ocr/ondoc_object.py @@ -1,17 +1,18 @@ +import statistics from typing import List -import numpy as np class OnDoc: """ - OnDoc is a helper class for the raw "ondocument" preset confid OCR result. Enables easy extraction - of common datapoints into usable objects. "ondocument" is the default extraction config on the - Indico platform. + OnDoc is a helper class for the raw "ondocument" preset confid OCR result. Enables + easy extraction of common datapoints into usable objects. "ondocument" is the + default extraction config on the Indico platform. """ def __init__(self, ondoc: List[dict]): """ - ondoc {List[dict]}: ondocument result object from indico.queries.DocumentExtraction + ondoc {List[dict]}: ondocument result object from + indico.queries.DocumentExtraction """ self.ondoc = ondoc @@ -73,12 +74,12 @@ def ocr_confidence(self, metric="mean") -> float: metric {str}: options are "mean" or "median" """ if metric not in ("mean", "median"): - raise Exception( + raise RuntimeError( f"Metric value must be either mean or median, not '{metric}'" ) if "confidence" not in self.ondoc[0]["chars"][0].keys(): - raise Exception( + raise RuntimeError( "You are likely using an old SDK version, confidence is not included" ) @@ -88,5 +89,5 @@ def ocr_confidence(self, metric="mean") -> float: for character in page["chars"] ] if metric == "mean": - return np.mean(confidence) - return np.median(confidence) \ No newline at end of file + return statistics.mean(confidence) + return statistics.median(confidence) diff --git a/indico_toolkit/ocr/standard_object.py b/indico_toolkit/ocr/standard_object.py index f759611c..52ca1f73 100644 --- a/indico_toolkit/ocr/standard_object.py +++ b/indico_toolkit/ocr/standard_object.py @@ -3,8 +3,8 @@ class StandardOcr: """ - StandardOcr is a helper class for the raw "standard" preset config OCR result. Enables easy extraction - of common datapoints into usable objects. + StandardOcr is a helper class for the raw "standard" preset config OCR result. + Enables easy extraction of common datapoints into usable objects. """ def __init__(self, standardocr: dict): @@ -39,7 +39,11 @@ def block_texts(self) -> List[str]: """ Return list of block-level text """ - return [block["text"] for page in self.standardocr["pages"] for block in page["blocks"]] + return [ + block["text"] + for page in self.standardocr["pages"] + for block in page["blocks"] + ] @property def total_pages(self) -> int: diff --git a/indico_toolkit/pipelines/__init__.py b/indico_toolkit/pipelines/__init__.py index 8c2e98d4..34693950 100644 --- a/indico_toolkit/pipelines/__init__.py +++ b/indico_toolkit/pipelines/__init__.py @@ -1 +1,3 @@ -from .file_processing import FileProcessing \ No newline at end of file +from .file_processing import FileProcessing + +__all__ = ("FileProcessing",) diff --git a/indico_toolkit/pipelines/file_processing.py b/indico_toolkit/pipelines/file_processing.py index d400e8ed..508f8063 100644 --- a/indico_toolkit/pipelines/file_processing.py +++ b/indico_toolkit/pipelines/file_processing.py @@ -1,15 +1,16 @@ -import os import json -from os.path import isfile, isdir -from pathlib import Path -from typing import List, Tuple, Union, Iterable +import os import shutil -import tempfile +from os.path import isfile +from pathlib import Path +from typing import Iterable, List, Tuple, Union + class FileProcessing: """ Class to support common file processing operations """ + def __init__(self, file_paths: List[str] = None): if file_paths is None: file_paths = [] @@ -24,9 +25,11 @@ def get_file_paths_from_dir( ): """ Recursively find all file in specified types within a target directory + Args: path_to_dir (str): root directory containing files - accepted_types (Tuple[str], optional): Valid extensions types to process . Defaults to ("pdf", "tiff", "tif", "doc", "docx"). + accepted_types (Tuple[str], optional): Valid extensions types to process. + Defaults to ("pdf", "tiff", "tif", "doc", "docx"). recursive_search (bool): search sub directories as well. Defaults to False. Raises: @@ -38,16 +41,17 @@ def get_file_paths_from_dir( self._non_recursive_file_search(path_to_dir, accepted_types) if len(self.file_paths) == 0: - raise Exception( + raise RuntimeError( f"There are no files ending with {accepted_types} in {path_to_dir}" ) print( - f"Found {len(self.file_paths)} valid files and {len(self.invalid_suffix_paths)} paths with invalid suffixes." + f"Found {len(self.file_paths)} valid files and " + f"{len(self.invalid_suffix_paths)} paths with invalid suffixes." ) def move_all_file_paths( - self, + self, origin_dir: str, destination_dir: str, accepted_types: Tuple[str], @@ -60,14 +64,12 @@ def move_all_file_paths( initial_filepath = Path(file) file_to_be_moved = initial_filepath.name new_path_name = destination_dir / file_to_be_moved - if copy_files == False: + if not copy_files: initial_filepath.rename(new_path_name) - else: - shutil.copyfile(initial_filepath, new_path_name) + else: + shutil.copyfile(initial_filepath, new_path_name) else: - raise Exception( - f'{destination_dir} is not a valid directory' - ) + raise RuntimeError(f"{destination_dir} is not a valid directory") def batch_files(self, batch_size: int = 20) -> List[str]: for i in range(0, len(self.file_paths), batch_size): @@ -77,13 +79,17 @@ def remove_files_if_processed(self, processed_files: Iterable[str]): """ Removes files from self.file_paths if they are part of provided file iterable Args: - processed_files (Iterable[str]): iterable of file names, NOT full paths, e.g. ["invoice.pdf",] + processed_files (Iterable[str]): iterable of file names, NOT full paths, + e.g. ["invoice.pdf",] """ unprocessed_filepaths = [] for filepath in self.file_paths: if self.file_name_from_path(filepath) not in processed_files: unprocessed_filepaths.append(filepath) - print(f"Removing {len(self.file_paths) - len(unprocessed_filepaths)} files from file_paths") + print( + f"Removing {len(self.file_paths) - len(unprocessed_filepaths)} " + "files from file_paths" + ) self.file_paths = unprocessed_filepaths @staticmethod @@ -100,7 +106,7 @@ def parent_directory_of_filepaths(self) -> List[str]: return [Path(i).parent.name for i in self.file_paths] @staticmethod - def join_paths(start_path:str, end_path: str) -> str: + def join_paths(start_path: str, end_path: str) -> str: return os.path.join(start_path, end_path) @staticmethod @@ -114,7 +120,7 @@ def get_file_path_suffix(filepath: str) -> str: @staticmethod def file_name_from_path(filepath: str) -> str: return Path(filepath).name - + @staticmethod def get_parent_path(filepath: str) -> str: return str(Path(filepath).parent) @@ -148,4 +154,4 @@ def _non_recursive_file_search(self, path_to_dir: str, accepted_types: Tuple[str def _check_acceptable_suffix(string: str, accepted_suffixes: Tuple[str]) -> bool: if string.lower().endswith(accepted_suffixes): return True - return False \ No newline at end of file + return False diff --git a/tests/auto_review/__init__.py b/indico_toolkit/py.typed similarity index 100% rename from tests/auto_review/__init__.py rename to indico_toolkit/py.typed diff --git a/indico_toolkit/results/model.py b/indico_toolkit/results/model.py old mode 100755 new mode 100644 diff --git a/indico_toolkit/retry.py b/indico_toolkit/retry.py index 24362344..4acd9c44 100644 --- a/indico_toolkit/retry.py +++ b/indico_toolkit/retry.py @@ -14,12 +14,6 @@ InnerReturnType = TypeVar("InnerReturnType") -class MaxRetriesExceeded(Exception): - """ - Raised when a function has retried more than `count` number of times. - """ - - def retry( *errors: "type[Exception]", count: int = 4, @@ -30,15 +24,14 @@ def retry( """ Decorate a function or coroutine to retry when it raises specified errors, apply exponential backoff and jitter to the wait time, - and raise `MaxRetriesExceeded` after it retries too many times. + and raise the last error if it retries too many times. By default, the decorated function or coroutine will be retried up to 4 times over - the course of ~2 minutes (waiting 1, 4, 16, and 64 seconds; plus up to 50% jitter) - before raising `MaxRetriesExceeded` from the last error. + the course of ~2 minutes (waiting 1, 4, 16, and 64 seconds; plus up to 50% jitter). Arguments: errors: Retry the function when it raises one of these errors. - count: Retry the function this many times before raising `MaxRetriesExceeded`. + count: Retry the function this many times. wait: Wait this many seconds after the first error before retrying. backoff: Multiply the wait time by this amount for each additional error. jitter: Add a random amount of time (up to this percent as a decimal) @@ -74,11 +67,9 @@ async def retrying_coroutine( # type: ignore[return] for times_retried in range(count + 1): try: return await decorated(*args, **kwargs) # type: ignore[no-any-return] - except errors as error: - last_error = error - - if times_retried >= count: - raise MaxRetriesExceeded() from last_error + except errors: + if times_retried >= count: + raise await asyncio.sleep(wait_time(times_retried)) @@ -92,11 +83,9 @@ def retrying_function( # type: ignore[return] for times_retried in range(count + 1): try: return decorated(*args, **kwargs) - except errors as error: - last_error = error - - if times_retried >= count: - raise MaxRetriesExceeded() from last_error + except errors: + if times_retried >= count: + raise time.sleep(wait_time(times_retried)) diff --git a/indico_toolkit/snapshots/__init__.py b/indico_toolkit/snapshots/__init__.py index 52a3e4f7..d467bfc5 100644 --- a/indico_toolkit/snapshots/__init__.py +++ b/indico_toolkit/snapshots/__init__.py @@ -1 +1,3 @@ from .snapshot import Snapshot + +__all__ = ("Snapshot",) diff --git a/indico_toolkit/snapshots/snapshot.py b/indico_toolkit/snapshots/snapshot.py index d20ea62c..26af8f2f 100644 --- a/indico_toolkit/snapshots/snapshot.py +++ b/indico_toolkit/snapshots/snapshot.py @@ -1,12 +1,17 @@ -from __future__ import ( - annotations, -) # from 3.10, don't need for same class reference in class method -from typing import List, Union, Tuple -import pandas as pd -import os import json +import os from json import JSONDecodeError -from indico_toolkit import ToolkitInstantiationError, ToolkitInputError +from typing import List, Tuple, Union + +from ..errors import ToolkitInputError, ToolkitInstantiationError + +try: + import pandas as pd + + _PANDAS_INSTALLED = True +except ImportError as error: + _PANDAS_INSTALLED = False + _IMPORT_ERROR = error # TODO: add functionality for classification snapshots @@ -24,10 +29,19 @@ def __init__( Args: path_to_snapshot (str): path to Snapshot CSV - text_col (str, optional): Column with text, will be inferred if not provided. Defaults to None. - label_col (str, optional): Column with labels, will be inferred if not provided. Defaults to None. - file_name_col (str, optional): Column with file names, will be inferred if not provided. Defaults to None. + text_col (str, optional): Column with text, will be inferred if not + provided. Defaults to None. + label_col (str, optional): Column with labels, will be inferred if not + provided. Defaults to None. + file_name_col (str, optional): Column with file names, will be inferred if + not provided. Defaults to None. """ + if not _PANDAS_INSTALLED: + raise RuntimeError( + "snapshots require additional dependencies: " + "`pip install indico-toolkit[snapshots]`" + ) from _IMPORT_ERROR + self.path_to_snapshot = path_to_snapshot self.df: pd.DataFrame = pd.read_csv(path_to_snapshot) self.label_col = label_col @@ -86,16 +100,19 @@ def standardize_column_names( def drop_unneeded_columns(self, columns_to_drop: List[str] = None): """ - Keep only text, labels, and file name columns or specify columns to drop by passing them in as a list. + Keep only text, labels, and file name columns or specify columns to drop by + passing them in as a list. """ if columns_to_drop: self.df.drop(labels=columns_to_drop, axis=1, inplace=True) else: self.df = self.df[[self.label_col, self.text_col, self.file_name_col]] - def append(self, snap_to_add: Snapshot): + def append(self, snap_to_add: "Snapshot"): """ - Append the rows from another Snapshot to this snapshot. Ensure column names are standardized beforehand. + Append the rows from another Snapshot to this snapshot. Ensure column names are + standardized beforehand. + Args: snap_to_add (Snapshot): Snapshot to add """ @@ -115,16 +132,17 @@ def get_extraction_label_names(self): def merge_by_file_name( self, - snap_to_merge: Snapshot, + snap_to_merge: "Snapshot", ensure_identical_text: bool = True, ): """ - Merge extraction labels for identical files. Merge is 'left' and file names / rows only present - in 'snap_to_merge' are excluded. + Merge extraction labels for identical files. Merge is 'left' and file names / + rows only present in 'snap_to_merge' are excluded. + Args: snap_to_merge (Snapshot): Snapshot you want to merge - ensure_identical_text (bool, optional): Require document text to be identical for common file name. - Defaults to True. + ensure_identical_text (bool, optional): Require document text to be + identical for common file name. Defaults to True. """ self._assert_key_column_names_match(snap_to_merge) suffix = "_to_merge" @@ -177,8 +195,8 @@ def get_all_labeled_text( Get all of the text that was tagged for a given label Args: label_name (str): name of the label - return_per_document (bool, optional): return a list per document or one list with everything. - Defaults to False. + return_per_document (bool, optional): return a list per document or one list + with everything. Defaults to False. """ available_labels = self.get_extraction_label_names() if label_name not in available_labels: @@ -202,13 +220,15 @@ def split_and_write_to_csv( self, output_dir: str, num_splits: int = 5, output_base_name: str = "split_num" ) -> None: """ - For large files that may face memory constraints, split the file into multiple CSVs and write - to disk. + For large files that may face memory constraints, split the file into multiple + CSVs and write to disk. + Args: output_dir (str): Location where split files will be written. num_splits (int, optional): The number of splits of the CSV. Defaults to 5. - output_base_name (str, optional): The base name of the split CSVs: Defaults to "split_num". - So files would be "split_num_1.csv", "split_num_2.csv", etc. + output_base_name (str, optional): The base name of the split CSVs: + Defaults to "split_num". So files would be "split_num_1.csv", + "split_num_2.csv", etc. """ split_length = self.number_of_samples // num_splits rows_taken = 0 @@ -226,9 +246,10 @@ def update_label_col_format(self, task_type: str = "annotation"): """ Modifies label column to updated format with spans. Args: - task_type (str): Task type to specifiy df as (annotation = extraction, classification = classification) + task_type (str): Task type to specifiy df as + (annotation = extraction, classification = classification) - NOTE: page_num in the span is currently set to None. + Note: page_num in the span is currently set to None. """ updated_column = [] for label_set in self.df[self.label_col]: @@ -245,7 +266,7 @@ def update_label_col_format(self, task_type: str = "annotation"): updated_column.append({"task_type": task_type, "targets": updated_targets}) self.df[self.label_col] = updated_column - def __eq__(self, other: Snapshot): + def __eq__(self, other: "Snapshot"): """ Check if two snapshots can be merged based on common column names """ @@ -265,7 +286,7 @@ def _convert_col_from_json(self): f"{self.label_col} doesn't contain valid extraction labels" ) - def _assert_key_column_names_match(self, snapshot: Snapshot): + def _assert_key_column_names_match(self, snapshot: "Snapshot"): try: assert self == snapshot except AssertionError: diff --git a/indico_toolkit/structure/__init__.py b/indico_toolkit/structure/__init__.py index e69de29b..dcfd19eb 100644 --- a/indico_toolkit/structure/__init__.py +++ b/indico_toolkit/structure/__init__.py @@ -0,0 +1,3 @@ +from .create_structure import Structure + +__all__ = ("Structure",) diff --git a/indico_toolkit/structure/create_structure.py b/indico_toolkit/structure/create_structure.py index c416ee8b..1ab82675 100644 --- a/indico_toolkit/structure/create_structure.py +++ b/indico_toolkit/structure/create_structure.py @@ -1,27 +1,26 @@ -import tempfile import json -import shutil import os +import shutil +import tempfile from typing import List from indico.queries import ( + AddModelGroupComponent, CreateDataset, CreateWorkflow, - NewLabelsetArguments, - AddModelGroupComponent, - GetWorkflow, GetDataset, + GetWorkflow, + NewLabelsetArguments, ) from indico.types import ( - OcrEngine, Dataset, - Workflow, + OcrEngine, TableReadOrder, + Workflow, ) -from indico.types import Workflow -from indico_toolkit.errors import ToolkitInputError -from .queries import * +from ..errors import ToolkitInputError +from .queries import GetExampleIds, GetTeachDetails, LabelTeachTask from .utils import ModelTaskType @@ -42,7 +41,8 @@ def create_dataset( Args: name_of_dataset (str): Name of the created dataset file_path (str): Path of the file to copy. - read_api (bool, optional): OCR Engine used for the dataset. Defaults to True=READ_API / False=OMNIPAGE + read_api (bool, optional): OCR Engine used for the dataset. + Defaults to True=READ_API / False=OMNIPAGE Kwargs: Advanced OCR settings """ @@ -65,7 +65,8 @@ def create_dataset( else: if arg == "table_read_order" and kwargs[arg] not in ["row", "column"]: raise ToolkitInputError( - f"Keyword argument {arg} got an unexpected value of {kwargs[arg]}, expected value of either 'row' or 'column'" + f"Keyword argument {arg} got an unexpected value of " + f"{kwargs[arg]}, expected value of either 'row' or 'column'" ) omnipage_settings.update({arg: kwargs[arg]}) @@ -101,13 +102,16 @@ def create_duplicate_dataset( **kwargs, ) -> Dataset: """ - Creates a dataset w/ duplicate instances of 1 file, historically used to create a spoofed demo. + Creates a dataset w/ duplicate instances of 1 file, historically used to create + a spoofed demo. Args: file_path (str): Path of the file to copy. name_of_dataset (str): Name of the created dataset - times_to_copy_files (int, optional): Amount of times to copy the file. Defaults to 55. - read_api (bool, optional): OCR Engine used for the dataset. Defaults to True=READ_API / False=OMNIPAGE + times_to_copy_files (int, optional): Amount of times to copy the file. + Defaults to 55. + read_api (bool, optional): OCR Engine used for the dataset. + Defaults to True=READ_API / False=OMNIPAGE Kwargs: Advanced OCR settings """ @@ -180,7 +184,8 @@ def add_teach_task( } if model_type not in model_map.keys(): raise ToolkitInputError( - f"{model_type} not found. Available options include {[model for model in model_map.keys()]}" + f"{model_type} not found. Available options " + f"include {[model for model in model_map.keys()]}" ) workflow = self.client.call(GetWorkflow(workflow_id)) if not prev_comp_id: @@ -206,7 +211,8 @@ def add_teach_task( ) ) print( - f"Newly created teach task with teach_id: {workflow.components[-1].model_group.questionnaire_id}" + "Newly created teach task with teach_id: " + f"{workflow.components[-1].model_group.questionnaire_id}" ) return workflow diff --git a/indico_toolkit/structure/queries.py b/indico_toolkit/structure/queries.py index 95505966..671da24b 100644 --- a/indico_toolkit/structure/queries.py +++ b/indico_toolkit/structure/queries.py @@ -1,5 +1,6 @@ from indico.queries import GraphQLRequest + class GetTeachDetails(GraphQLRequest): GET_TEACH_DETAILS = """ query getCrowdlabelQuestionnaire($teach_task_id: Int!) { @@ -33,12 +34,31 @@ def __init__(self, *, teach_task_id: int): def process_response(self, response): return super().process_response(response) + class GetExampleIds(GraphQLRequest): GET_EXAMPLES = """ - query getExamplesList($modelGroupId: Int!, $filters: ExampleFilter, $skip: Int, $before: Int, $after: Int, $limit: Int, $desc: Boolean, $orderBy: ExampleOrder) { + query getExamplesList( + $modelGroupId: Int!, + $filters: ExampleFilter, + $skip: Int, + $before: Int, + $after: Int, + $limit: Int, + $desc: Boolean, + $orderBy: ExampleOrder + ) { modelGroup(modelGroupId: $modelGroupId) { id - pagedExamples(filters: $filters, skip: $skip, before: $before, after: $after, limit: $limit, desc: $desc, orderBy: $orderBy) { + pagedExamples( + filters: $filters, + skip: $skip, + before: $before, + after: $after, + limit: $limit, + desc: $desc, + orderBy: + $orderBy + ) { examples { id datarowId @@ -68,6 +88,7 @@ def __init__(self, *, model_group_id: int, limit: int): def process_response(self, response): return super().process_response(response) + class LabelTeachTask(GraphQLRequest): LABEL_TASK = """ mutation submitQuestionnaireExample @@ -95,4 +116,4 @@ def __init__(self, *, label_set_id: int, labels, model_group_id: int): ) def process_response(self, response): - return super().process_response(response) \ No newline at end of file + return super().process_response(response) diff --git a/indico_toolkit/structure/utils.py b/indico_toolkit/structure/utils.py index d7111d57..58bbb285 100644 --- a/indico_toolkit/structure/utils.py +++ b/indico_toolkit/structure/utils.py @@ -1,7 +1,11 @@ from enum import Enum + class ModelTaskType(Enum): - """Issues with SDK 7/15/22-- need to hard code this here, new SDK release with fix is imminent""" + """ + Issues with SDK 7/15/22-- need to hard code this here, new SDK release with fix is + imminent + """ CLASSIFICATION = 1 FORM_EXTRACTION = 2 @@ -9,4 +13,4 @@ class ModelTaskType(Enum): CLASSIFICATION_MULTIPLE = 4 REGRESSION = 5 ANNOTATION = 6 - CLASSIFICATION_UNBUNDLING = 7 \ No newline at end of file + CLASSIFICATION_UNBUNDLING = 7 diff --git a/indico_toolkit/types/__init__.py b/indico_toolkit/types/__init__.py index 2f9a6af0..80009c98 100644 --- a/indico_toolkit/types/__init__.py +++ b/indico_toolkit/types/__init__.py @@ -1,4 +1,12 @@ -from .workflow_object import WorkflowResult -from .predictions import Predictions -from .extractions import Extractions from .classification import Classification, ClassificationMGP +from .extractions import Extractions +from .predictions import Predictions +from .workflow_object import WorkflowResult + +__all__ = ( + "Classification", + "ClassificationMGP", + "Extractions", + "Predictions", + "WorkflowResult", +) diff --git a/indico_toolkit/types/classification.py b/indico_toolkit/types/classification.py index 63eca610..f79366dc 100644 --- a/indico_toolkit/types/classification.py +++ b/indico_toolkit/types/classification.py @@ -1,8 +1,15 @@ -from typing import Dict -import pandas as pd from operator import itemgetter +from typing import Dict + +from ..pipelines import FileProcessing -from indico_toolkit.pipelines import FileProcessing +try: + import pandas as pd + + _PANDAS_INSTALLED = True +except ImportError as error: + _PANDAS_INSTALLED = False + _IMPORT_ERROR = error class Classification: @@ -38,6 +45,12 @@ def confidence_scores(self) -> Dict[str, float]: def to_csv( self, save_path, filename: str = "", append_if_exists: bool = True ) -> None: + if not _PANDAS_INSTALLED: + raise RuntimeError( + "saving predictions to CSV requires additional dependencies: " + "`pip install indico-toolkit[predictions]`" + ) from _IMPORT_ERROR + results = {filename: self._pred} df = pd.DataFrame(results).transpose() df["filename"] = filename diff --git a/indico_toolkit/types/extractions.py b/indico_toolkit/types/extractions.py index 51361bf0..f7ceef4c 100644 --- a/indico_toolkit/types/extractions.py +++ b/indico_toolkit/types/extractions.py @@ -1,9 +1,17 @@ -from typing import List, Dict, Set, Iterable, Union -from collections import defaultdict, Counter -import pandas as pd +from collections import Counter, defaultdict from copy import deepcopy -from indico_toolkit.pipelines import FileProcessing -from indico_toolkit import ToolkitInputError +from typing import Dict, Iterable, List, Set, Union + +from ..errors import ToolkitInputError +from ..pipelines import FileProcessing + +try: + import pandas as pd + + _PANDAS_INSTALLED = True +except ImportError as error: + _PANDAS_INSTALLED = False + _IMPORT_ERROR = error class Extractions: @@ -18,7 +26,8 @@ def __init__(self, predictions: List[dict]): @property def to_dict_by_label(self) -> Dict[str, list]: """ - Generate a dictionary where key is label string and value is list of all predictions of that label + Generate a dictionary where key is label string and value is list of all + predictions of that label """ prediction_label_map = defaultdict(list) for pred in self._preds: @@ -34,7 +43,8 @@ def remove_by_confidence(self, confidence: float = 0.95, labels: List[str] = Non Remove predictions that are less than given confidence Args: confidence (float, optional): confidence theshold. Defaults to 0.95. - labels (List[str], optional): Labels where this applies, if None applies to all. Defaults to None. + labels (List[str], optional): Labels where this applies, + if None applies to all. Defaults to None. """ high_conf_preds = [] for pred in self._preds: @@ -49,7 +59,8 @@ def remove_by_confidence(self, confidence: float = 0.95, labels: List[str] = Non def remove_except_max_confidence(self, labels: List[str]): """ - Removes all predictions except the highest confidence within each specified class + Removes all predictions except the highest confidence within each specified + class """ label_set = self.label_set for label in labels: @@ -62,7 +73,8 @@ def remove_except_max_confidence(self, labels: List[str]): def set_confidence_key_to_max_value(self, inplace: bool = True): """ - Overwite confidence dictionary to just max confidence float to make preds more readable. + Overwite confidence dictionary to just max confidence float to make preds more + readable. """ if inplace: self._set_confidence_key_to_max_value(self._preds) @@ -97,7 +109,8 @@ def remove_all_by_label(self, labels: Iterable[str]): def remove_human_added_predictions(self): """ - Remove predictions that were not added by the model (i.e. added by scripted or human review) + Remove predictions that were not added by the model (i.e. added by scripted or + human review) """ self._preds = [ i for i in self._preds if not self.is_manually_added_prediction(i) @@ -160,7 +173,7 @@ def exist_multiple_vals_for_label(self, label: str) -> bool: def get_most_common_text_value(self, label: str) -> Union[str, None]: """ - Return the most common text value. If there is a tie- returns None. + Return the most common text value. If there is a tie- returns None. """ if label not in self.label_set: raise ToolkitInputError(f"There are no predictions for: '{label}'") @@ -194,8 +207,15 @@ def to_csv( save_path (str): path to write CSV include_start_end (bool): include columns for start/end indexes append_if_exists (bool): if path exists, append to that CSV - filename (str, optional): the file where the preds were derived from. Defaults to "". + filename (str, optional): the file where the preds were derived from. + Defaults to "". """ + if not _PANDAS_INSTALLED: + raise RuntimeError( + "saving predictions to CSV requires additional dependencies: " + "`pip install indico-toolkit[predictions]`" + ) from _IMPORT_ERROR + preds = self.set_confidence_key_to_max_value(inplace=False) df = pd.DataFrame(preds) if not include_start_end: diff --git a/indico_toolkit/types/predictions.py b/indico_toolkit/types/predictions.py index 7c18df54..747f777b 100644 --- a/indico_toolkit/types/predictions.py +++ b/indico_toolkit/types/predictions.py @@ -1,26 +1,28 @@ -from typing import List, Dict, Set - -from indico_toolkit.errors import ToolkitInputError -from .extractions import Extractions +from ..errors import ToolkitInputError from .classification import Classification, ClassificationMGP +from .extractions import Extractions class Predictions: """ Factory class for predictions """ + @staticmethod def get_obj(predictions): """ Returns: Extractions object or Classification object depending on predictions type """ - if type(predictions) == list: + if isinstance(predictions, list): return Extractions(predictions) - elif type(predictions) == dict: + elif isinstance(predictions, dict): if "label" in predictions: return Classification(predictions) else: return ClassificationMGP(predictions) else: - raise ToolkitInputError(f"Unable to process predictions with type {type(predictions)}. Predictions: {predictions}") + raise ToolkitInputError( + f"Unable to process predictions with type {type(predictions)}. " + f"Predictions: {predictions}" + ) diff --git a/indico_toolkit/types/workflow_object.py b/indico_toolkit/types/workflow_object.py index 67401bc9..bd4fa305 100644 --- a/indico_toolkit/types/workflow_object.py +++ b/indico_toolkit/types/workflow_object.py @@ -1,7 +1,6 @@ from typing import List -from indico_toolkit import ToolkitInputError - +from ..errors import ToolkitInputError from .predictions import Predictions @@ -12,7 +11,8 @@ def __init__(self, result: dict, model_name: str = None): Args: result (dict): raw workflow result object - model_name (str, optional): Extraction/Classification model name . Defaults to None. + model_name (str, optional): Extraction/Classification model name. + Defaults to None. """ self.result = result self.model_name = model_name @@ -20,7 +20,8 @@ def __init__(self, result: dict, model_name: str = None): def _check_is_valid_model_name(self) -> None: if self.model_name not in self.available_model_names: raise ToolkitInputError( - f"{self.model_name} is not an available model name. Options: {self.available_model_names}" + f"{self.model_name} is not an available model name. " + f"Options: {self.available_model_names}" ) def __repr__(self): @@ -85,7 +86,8 @@ def _set_model_name(self): self._check_is_valid_model_name() elif len(self.available_model_names) > 1: raise ToolkitInputError( - f"Multiple models available, you must set self.model_name to one of {self.available_model_names}" + "Multiple models available, you must set self.model_name to one of " + f"{self.available_model_names}" ) else: self.model_name = self.available_model_names[0] diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 00000000..a205789e --- /dev/null +++ b/mypy.ini @@ -0,0 +1,76 @@ +[mypy] +# +# pyproject.toml contains the main configuration for mypy, which applies to existing +# modules that are type hinted and any new modules that are added. +# +# This file contains overrides to ignore errors in older modules and dependencies that +# aren't type hinted, with the intention that these errors are fixed over time so that +# this file can eventually be removed. +# +# To fix a module, pick one from this list and comment out its override. Run mypy and +# address the errors it finds, repeating until the module passes. Once it passes, +# remove the override from the list and commit the changes. +# + +[mypy-indico.*] +ignore_missing_imports = True + +[mypy-indico_toolkit.association.*] +ignore_errors = True + +[mypy-indico_toolkit.auto_populate.*] +ignore_errors = True + +[mypy-indico_toolkit.auto_review.*] +ignore_errors = True + +[mypy-indico_toolkit.etloutput.*] +ignore_errors = True + +[mypy-indico_toolkit.indico_wrapper.*] +ignore_errors = True + +[mypy-indico_toolkit.metrics.*] +ignore_errors = True + +[mypy-indico_toolkit.ocr.*] +ignore_errors = True + +[mypy-indico_toolkit.pipelines.*] +ignore_errors = True + +[mypy-indico_toolkit.polling.*] +ignore_errors = True + +[mypy-indico_toolkit.results.*] +ignore_errors = True + +[mypy-indico_toolkit.snapshots.*] +ignore_errors = True + +[mypy-indico_toolkit.structure.*] +ignore_errors = True + +[mypy-indico_toolkit.types.*] +ignore_errors = True + +[mypy-tests.association.*] +ignore_errors = true + +[mypy-tests.data.*] +ignore_errors = true + +[mypy-tests.integration.*] +ignore_errors = true + +[mypy-tests.metrics.*] +ignore_errors = true + +[mypy-tests.pipelines.*] +ignore_errors = true + +[mypy-tests.snapshots.*] +ignore_errors = true + +[mypy-tests.types.*] +ignore_errors = true diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 00000000..21c09953 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,2034 @@ +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. + +[[package]] +name = "aiodns" +version = "3.2.0" +description = "Simple DNS resolver for asyncio" +optional = false +python-versions = "*" +groups = ["main"] +markers = "sys_platform == \"linux\" or sys_platform == \"darwin\"" +files = [ + {file = "aiodns-3.2.0-py3-none-any.whl", hash = "sha256:e443c0c27b07da3174a109fd9e736d69058d808f144d3c9d56dbd1776964c5f5"}, + {file = "aiodns-3.2.0.tar.gz", hash = "sha256:62869b23409349c21b072883ec8998316b234c9a9e36675756e8e317e8768f72"}, +] + +[package.dependencies] +pycares = ">=4.0.0" + +[[package]] +name = "aiohappyeyeballs" +version = "2.4.8" +description = "Happy Eyeballs for asyncio" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "aiohappyeyeballs-2.4.8-py3-none-any.whl", hash = "sha256:6cac4f5dd6e34a9644e69cf9021ef679e4394f54e58a183056d12009e42ea9e3"}, + {file = "aiohappyeyeballs-2.4.8.tar.gz", hash = "sha256:19728772cb12263077982d2f55453babd8bec6a052a926cd5c0c42796da8bf62"}, +] + +[[package]] +name = "aiohttp" +version = "3.11.13" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "aiohttp-3.11.13-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a4fe27dbbeec445e6e1291e61d61eb212ee9fed6e47998b27de71d70d3e8777d"}, + {file = "aiohttp-3.11.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9e64ca2dbea28807f8484c13f684a2f761e69ba2640ec49dacd342763cc265ef"}, + {file = "aiohttp-3.11.13-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9840be675de208d1f68f84d578eaa4d1a36eee70b16ae31ab933520c49ba1325"}, + {file = "aiohttp-3.11.13-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28a772757c9067e2aee8a6b2b425d0efaa628c264d6416d283694c3d86da7689"}, + {file = "aiohttp-3.11.13-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b88aca5adbf4625e11118df45acac29616b425833c3be7a05ef63a6a4017bfdb"}, + {file = "aiohttp-3.11.13-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce10ddfbe26ed5856d6902162f71b8fe08545380570a885b4ab56aecfdcb07f4"}, + {file = "aiohttp-3.11.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa48dac27f41b36735c807d1ab093a8386701bbf00eb6b89a0f69d9fa26b3671"}, + {file = "aiohttp-3.11.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89ce611b1eac93ce2ade68f1470889e0173d606de20c85a012bfa24be96cf867"}, + {file = "aiohttp-3.11.13-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:78e4dd9c34ec7b8b121854eb5342bac8b02aa03075ae8618b6210a06bbb8a115"}, + {file = "aiohttp-3.11.13-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:66047eacbc73e6fe2462b77ce39fc170ab51235caf331e735eae91c95e6a11e4"}, + {file = "aiohttp-3.11.13-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5ad8f1c19fe277eeb8bc45741c6d60ddd11d705c12a4d8ee17546acff98e0802"}, + {file = "aiohttp-3.11.13-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64815c6f02e8506b10113ddbc6b196f58dbef135751cc7c32136df27b736db09"}, + {file = "aiohttp-3.11.13-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:967b93f21b426f23ca37329230d5bd122f25516ae2f24a9cea95a30023ff8283"}, + {file = "aiohttp-3.11.13-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cf1f31f83d16ec344136359001c5e871915c6ab685a3d8dee38e2961b4c81730"}, + {file = "aiohttp-3.11.13-cp310-cp310-win32.whl", hash = "sha256:00c8ac69e259c60976aa2edae3f13d9991cf079aaa4d3cd5a49168ae3748dee3"}, + {file = "aiohttp-3.11.13-cp310-cp310-win_amd64.whl", hash = "sha256:90d571c98d19a8b6e793b34aa4df4cee1e8fe2862d65cc49185a3a3d0a1a3996"}, + {file = "aiohttp-3.11.13-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6b35aab22419ba45f8fc290d0010898de7a6ad131e468ffa3922b1b0b24e9d2e"}, + {file = "aiohttp-3.11.13-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f81cba651db8795f688c589dd11a4fbb834f2e59bbf9bb50908be36e416dc760"}, + {file = "aiohttp-3.11.13-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f55d0f242c2d1fcdf802c8fabcff25a9d85550a4cf3a9cf5f2a6b5742c992839"}, + {file = "aiohttp-3.11.13-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4bea08a6aad9195ac9b1be6b0c7e8a702a9cec57ce6b713698b4a5afa9c2e33"}, + {file = "aiohttp-3.11.13-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6070bcf2173a7146bb9e4735b3c62b2accba459a6eae44deea0eb23e0035a23"}, + {file = "aiohttp-3.11.13-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:718d5deb678bc4b9d575bfe83a59270861417da071ab44542d0fcb6faa686636"}, + {file = "aiohttp-3.11.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f6b2c5b4a4d22b8fb2c92ac98e0747f5f195e8e9448bfb7404cd77e7bfa243f"}, + {file = "aiohttp-3.11.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:747ec46290107a490d21fe1ff4183bef8022b848cf9516970cb31de6d9460088"}, + {file = "aiohttp-3.11.13-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:01816f07c9cc9d80f858615b1365f8319d6a5fd079cd668cc58e15aafbc76a54"}, + {file = "aiohttp-3.11.13-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:a08ad95fcbd595803e0c4280671d808eb170a64ca3f2980dd38e7a72ed8d1fea"}, + {file = "aiohttp-3.11.13-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c97be90d70f7db3aa041d720bfb95f4869d6063fcdf2bb8333764d97e319b7d0"}, + {file = "aiohttp-3.11.13-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ab915a57c65f7a29353c8014ac4be685c8e4a19e792a79fe133a8e101111438e"}, + {file = "aiohttp-3.11.13-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:35cda4e07f5e058a723436c4d2b7ba2124ab4e0aa49e6325aed5896507a8a42e"}, + {file = "aiohttp-3.11.13-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:af55314407714fe77a68a9ccaab90fdb5deb57342585fd4a3a8102b6d4370080"}, + {file = "aiohttp-3.11.13-cp311-cp311-win32.whl", hash = "sha256:42d689a5c0a0c357018993e471893e939f555e302313d5c61dfc566c2cad6185"}, + {file = "aiohttp-3.11.13-cp311-cp311-win_amd64.whl", hash = "sha256:b73a2b139782a07658fbf170fe4bcdf70fc597fae5ffe75e5b67674c27434a9f"}, + {file = "aiohttp-3.11.13-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2eabb269dc3852537d57589b36d7f7362e57d1ece308842ef44d9830d2dc3c90"}, + {file = "aiohttp-3.11.13-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7b77ee42addbb1c36d35aca55e8cc6d0958f8419e458bb70888d8c69a4ca833d"}, + {file = "aiohttp-3.11.13-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55789e93c5ed71832e7fac868167276beadf9877b85697020c46e9a75471f55f"}, + {file = "aiohttp-3.11.13-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c929f9a7249a11e4aa5c157091cfad7f49cc6b13f4eecf9b747104befd9f56f2"}, + {file = "aiohttp-3.11.13-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d33851d85537bbf0f6291ddc97926a754c8f041af759e0aa0230fe939168852b"}, + {file = "aiohttp-3.11.13-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9229d8613bd8401182868fe95688f7581673e1c18ff78855671a4b8284f47bcb"}, + {file = "aiohttp-3.11.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:669dd33f028e54fe4c96576f406ebb242ba534dd3a981ce009961bf49960f117"}, + {file = "aiohttp-3.11.13-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c1b20a1ace54af7db1f95af85da530fe97407d9063b7aaf9ce6a32f44730778"}, + {file = "aiohttp-3.11.13-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5724cc77f4e648362ebbb49bdecb9e2b86d9b172c68a295263fa072e679ee69d"}, + {file = "aiohttp-3.11.13-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:aa36c35e94ecdb478246dd60db12aba57cfcd0abcad43c927a8876f25734d496"}, + {file = "aiohttp-3.11.13-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9b5b37c863ad5b0892cc7a4ceb1e435e5e6acd3f2f8d3e11fa56f08d3c67b820"}, + {file = "aiohttp-3.11.13-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:e06cf4852ce8c4442a59bae5a3ea01162b8fcb49ab438d8548b8dc79375dad8a"}, + {file = "aiohttp-3.11.13-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5194143927e494616e335d074e77a5dac7cd353a04755330c9adc984ac5a628e"}, + {file = "aiohttp-3.11.13-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:afcb6b275c2d2ba5d8418bf30a9654fa978b4f819c2e8db6311b3525c86fe637"}, + {file = "aiohttp-3.11.13-cp312-cp312-win32.whl", hash = "sha256:7104d5b3943c6351d1ad7027d90bdd0ea002903e9f610735ac99df3b81f102ee"}, + {file = "aiohttp-3.11.13-cp312-cp312-win_amd64.whl", hash = "sha256:47dc018b1b220c48089b5b9382fbab94db35bef2fa192995be22cbad3c5730c8"}, + {file = "aiohttp-3.11.13-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9862d077b9ffa015dbe3ce6c081bdf35135948cb89116e26667dd183550833d1"}, + {file = "aiohttp-3.11.13-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fbfef0666ae9e07abfa2c54c212ac18a1f63e13e0760a769f70b5717742f3ece"}, + {file = "aiohttp-3.11.13-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:93a1f7d857c4fcf7cabb1178058182c789b30d85de379e04f64c15b7e88d66fb"}, + {file = "aiohttp-3.11.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba40b7ae0f81c7029583a338853f6607b6d83a341a3dcde8bed1ea58a3af1df9"}, + {file = "aiohttp-3.11.13-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b5b95787335c483cd5f29577f42bbe027a412c5431f2f80a749c80d040f7ca9f"}, + {file = "aiohttp-3.11.13-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7d474c5c1f0b9405c1565fafdc4429fa7d986ccbec7ce55bc6a330f36409cad"}, + {file = "aiohttp-3.11.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e83fb1991e9d8982b3b36aea1e7ad27ea0ce18c14d054c7a404d68b0319eebb"}, + {file = "aiohttp-3.11.13-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4586a68730bd2f2b04a83e83f79d271d8ed13763f64b75920f18a3a677b9a7f0"}, + {file = "aiohttp-3.11.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fe4eb0e7f50cdb99b26250d9328faef30b1175a5dbcfd6d0578d18456bac567"}, + {file = "aiohttp-3.11.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2a8a6bc19818ac3e5596310ace5aa50d918e1ebdcc204dc96e2f4d505d51740c"}, + {file = "aiohttp-3.11.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7f27eec42f6c3c1df09cfc1f6786308f8b525b8efaaf6d6bd76c1f52c6511f6a"}, + {file = "aiohttp-3.11.13-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:2a4a13dfbb23977a51853b419141cd0a9b9573ab8d3a1455c6e63561387b52ff"}, + {file = "aiohttp-3.11.13-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:02876bf2f69b062584965507b07bc06903c2dc93c57a554b64e012d636952654"}, + {file = "aiohttp-3.11.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b992778d95b60a21c4d8d4a5f15aaab2bd3c3e16466a72d7f9bfd86e8cea0d4b"}, + {file = "aiohttp-3.11.13-cp313-cp313-win32.whl", hash = "sha256:507ab05d90586dacb4f26a001c3abf912eb719d05635cbfad930bdbeb469b36c"}, + {file = "aiohttp-3.11.13-cp313-cp313-win_amd64.whl", hash = "sha256:5ceb81a4db2decdfa087381b5fc5847aa448244f973e5da232610304e199e7b2"}, + {file = "aiohttp-3.11.13-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:51c3ff9c7a25f3cad5c09d9aacbc5aefb9267167c4652c1eb737989b554fe278"}, + {file = "aiohttp-3.11.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e271beb2b1dabec5cd84eb488bdabf9758d22ad13471e9c356be07ad139b3012"}, + {file = "aiohttp-3.11.13-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e9eb7e5764abcb49f0e2bd8f5731849b8728efbf26d0cac8e81384c95acec3f"}, + {file = "aiohttp-3.11.13-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baae005092e3f200de02699314ac8933ec20abf998ec0be39448f6605bce93df"}, + {file = "aiohttp-3.11.13-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1982c98ac62c132d2b773d50e2fcc941eb0b8bad3ec078ce7e7877c4d5a2dce7"}, + {file = "aiohttp-3.11.13-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2b25b2eeb35707113b2d570cadc7c612a57f1c5d3e7bb2b13870fe284e08fc0"}, + {file = "aiohttp-3.11.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b27961d65639128336b7a7c3f0046dcc62a9443d5ef962e3c84170ac620cec47"}, + {file = "aiohttp-3.11.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a01fe9f1e05025eacdd97590895e2737b9f851d0eb2e017ae9574d9a4f0b6252"}, + {file = "aiohttp-3.11.13-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fa1fb1b61881c8405829c50e9cc5c875bfdbf685edf57a76817dfb50643e4a1a"}, + {file = "aiohttp-3.11.13-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:25de43bb3cf83ad83efc8295af7310219af6dbe4c543c2e74988d8e9c8a2a917"}, + {file = "aiohttp-3.11.13-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fe7065e2215e4bba63dc00db9ae654c1ba3950a5fff691475a32f511142fcddb"}, + {file = "aiohttp-3.11.13-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:7836587eef675a17d835ec3d98a8c9acdbeb2c1d72b0556f0edf4e855a25e9c1"}, + {file = "aiohttp-3.11.13-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:85fa0b18558eb1427090912bd456a01f71edab0872f4e0f9e4285571941e4090"}, + {file = "aiohttp-3.11.13-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a86dc177eb4c286c19d1823ac296299f59ed8106c9536d2b559f65836e0fb2c6"}, + {file = "aiohttp-3.11.13-cp39-cp39-win32.whl", hash = "sha256:684eea71ab6e8ade86b9021bb62af4bf0881f6be4e926b6b5455de74e420783a"}, + {file = "aiohttp-3.11.13-cp39-cp39-win_amd64.whl", hash = "sha256:82c249f2bfa5ecbe4a1a7902c81c0fba52ed9ebd0176ab3047395d02ad96cfcb"}, + {file = "aiohttp-3.11.13.tar.gz", hash = "sha256:8ce789231404ca8fff7f693cdce398abf6d90fd5dae2b1847477196c243b1fbb"}, +] + +[package.dependencies] +aiodns = {version = ">=3.2.0", optional = true, markers = "(sys_platform == \"linux\" or sys_platform == \"darwin\") and extra == \"speedups\""} +aiohappyeyeballs = ">=2.3.0" +aiosignal = ">=1.1.2" +async-timeout = {version = ">=4.0,<6.0", markers = "python_version < \"3.11\""} +attrs = ">=17.3.0" +Brotli = {version = "*", optional = true, markers = "platform_python_implementation == \"CPython\" and extra == \"speedups\""} +brotlicffi = {version = "*", optional = true, markers = "platform_python_implementation != \"CPython\" and extra == \"speedups\""} +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +propcache = ">=0.2.0" +yarl = ">=1.17.0,<2.0" + +[package.extras] +speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.2.0) ; sys_platform == \"linux\" or sys_platform == \"darwin\"", "brotlicffi ; platform_python_implementation != \"CPython\""] + +[[package]] +name = "aiosignal" +version = "1.3.2" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, + {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "async-timeout" +version = "5.0.1" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.11\"" +files = [ + {file = "async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c"}, + {file = "async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3"}, +] + +[[package]] +name = "attrs" +version = "25.1.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "attrs-25.1.0-py3-none-any.whl", hash = "sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a"}, + {file = "attrs-25.1.0.tar.gz", hash = "sha256:1c97078a80c814273a76b2a298a932eb681c87415c11dee0a6921de7f1b02c3e"}, +] + +[package.extras] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] + +[[package]] +name = "black" +version = "25.1.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "black-25.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:759e7ec1e050a15f89b770cefbf91ebee8917aac5c20483bc2d80a6c3a04df32"}, + {file = "black-25.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e519ecf93120f34243e6b0054db49c00a35f84f195d5bce7e9f5cfc578fc2da"}, + {file = "black-25.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:055e59b198df7ac0b7efca5ad7ff2516bca343276c466be72eb04a3bcc1f82d7"}, + {file = "black-25.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:db8ea9917d6f8fc62abd90d944920d95e73c83a5ee3383493e35d271aca872e9"}, + {file = "black-25.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a39337598244de4bae26475f77dda852ea00a93bd4c728e09eacd827ec929df0"}, + {file = "black-25.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96c1c7cd856bba8e20094e36e0f948718dc688dba4a9d78c3adde52b9e6c2299"}, + {file = "black-25.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bce2e264d59c91e52d8000d507eb20a9aca4a778731a08cfff7e5ac4a4bb7096"}, + {file = "black-25.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:172b1dbff09f86ce6f4eb8edf9dede08b1fce58ba194c87d7a4f1a5aa2f5b3c2"}, + {file = "black-25.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4b60580e829091e6f9238c848ea6750efed72140b91b048770b64e74fe04908b"}, + {file = "black-25.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e2978f6df243b155ef5fa7e558a43037c3079093ed5d10fd84c43900f2d8ecc"}, + {file = "black-25.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b48735872ec535027d979e8dcb20bf4f70b5ac75a8ea99f127c106a7d7aba9f"}, + {file = "black-25.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:ea0213189960bda9cf99be5b8c8ce66bb054af5e9e861249cd23471bd7b0b3ba"}, + {file = "black-25.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8f0b18a02996a836cc9c9c78e5babec10930862827b1b724ddfe98ccf2f2fe4f"}, + {file = "black-25.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:afebb7098bfbc70037a053b91ae8437c3857482d3a690fefc03e9ff7aa9a5fd3"}, + {file = "black-25.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:030b9759066a4ee5e5aca28c3c77f9c64789cdd4de8ac1df642c40b708be6171"}, + {file = "black-25.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:a22f402b410566e2d1c950708c77ebf5ebd5d0d88a6a2e87c86d9fb48afa0d18"}, + {file = "black-25.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a1ee0a0c330f7b5130ce0caed9936a904793576ef4d2b98c40835d6a65afa6a0"}, + {file = "black-25.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3df5f1bf91d36002b0a75389ca8663510cf0531cca8aa5c1ef695b46d98655f"}, + {file = "black-25.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9e6827d563a2c820772b32ce8a42828dc6790f095f441beef18f96aa6f8294e"}, + {file = "black-25.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:bacabb307dca5ebaf9c118d2d2f6903da0d62c9faa82bd21a33eecc319559355"}, + {file = "black-25.1.0-py3-none-any.whl", hash = "sha256:95e8176dae143ba9097f351d174fdaf0ccd29efb414b362ae3fd72bf0f710717"}, + {file = "black-25.1.0.tar.gz", hash = "sha256:33496d5cd1222ad73391352b4ae8da15253c5de89b93a80b3e2c8d9a19ec2666"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.10)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "brotli" +version = "1.1.0" +description = "Python bindings for the Brotli compression library" +optional = false +python-versions = "*" +groups = ["main"] +markers = "platform_python_implementation == \"CPython\"" +files = [ + {file = "Brotli-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e1140c64812cb9b06c922e77f1c26a75ec5e3f0fb2bf92cc8c58720dec276752"}, + {file = "Brotli-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c8fd5270e906eef71d4a8d19b7c6a43760c6abcfcc10c9101d14eb2357418de9"}, + {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ae56aca0402a0f9a3431cddda62ad71666ca9d4dc3a10a142b9dce2e3c0cda3"}, + {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43ce1b9935bfa1ede40028054d7f48b5469cd02733a365eec8a329ffd342915d"}, + {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:7c4855522edb2e6ae7fdb58e07c3ba9111e7621a8956f481c68d5d979c93032e"}, + {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:38025d9f30cf4634f8309c6874ef871b841eb3c347e90b0851f63d1ded5212da"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e6a904cb26bfefc2f0a6f240bdf5233be78cd2488900a2f846f3c3ac8489ab80"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a37b8f0391212d29b3a91a799c8e4a2855e0576911cdfb2515487e30e322253d"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e84799f09591700a4154154cab9787452925578841a94321d5ee8fb9a9a328f0"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f66b5337fa213f1da0d9000bc8dc0cb5b896b726eefd9c6046f699b169c41b9e"}, + {file = "Brotli-1.1.0-cp310-cp310-win32.whl", hash = "sha256:be36e3d172dc816333f33520154d708a2657ea63762ec16b62ece02ab5e4daf2"}, + {file = "Brotli-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:0c6244521dda65ea562d5a69b9a26120769b7a9fb3db2fe9545935ed6735b128"}, + {file = "Brotli-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a3daabb76a78f829cafc365531c972016e4aa8d5b4bf60660ad8ecee19df7ccc"}, + {file = "Brotli-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c8146669223164fc87a7e3de9f81e9423c67a79d6b3447994dfb9c95da16e2d6"}, + {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30924eb4c57903d5a7526b08ef4a584acc22ab1ffa085faceb521521d2de32dd"}, + {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ceb64bbc6eac5a140ca649003756940f8d6a7c444a68af170b3187623b43bebf"}, + {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a469274ad18dc0e4d316eefa616d1d0c2ff9da369af19fa6f3daa4f09671fd61"}, + {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524f35912131cc2cabb00edfd8d573b07f2d9f21fa824bd3fb19725a9cf06327"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5b3cc074004d968722f51e550b41a27be656ec48f8afaeeb45ebf65b561481dd"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:19c116e796420b0cee3da1ccec3b764ed2952ccfcc298b55a10e5610ad7885f9"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:510b5b1bfbe20e1a7b3baf5fed9e9451873559a976c1a78eebaa3b86c57b4265"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a1fd8a29719ccce974d523580987b7f8229aeace506952fa9ce1d53a033873c8"}, + {file = "Brotli-1.1.0-cp311-cp311-win32.whl", hash = "sha256:39da8adedf6942d76dc3e46653e52df937a3c4d6d18fdc94a7c29d263b1f5b50"}, + {file = "Brotli-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:aac0411d20e345dc0920bdec5548e438e999ff68d77564d5e9463a7ca9d3e7b1"}, + {file = "Brotli-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:316cc9b17edf613ac76b1f1f305d2a748f1b976b033b049a6ecdfd5612c70409"}, + {file = "Brotli-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:caf9ee9a5775f3111642d33b86237b05808dafcd6268faa492250e9b78046eb2"}, + {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70051525001750221daa10907c77830bc889cb6d865cc0b813d9db7fefc21451"}, + {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f4bf76817c14aa98cc6697ac02f3972cb8c3da93e9ef16b9c66573a68014f91"}, + {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0c5516f0aed654134a2fc936325cc2e642f8a0e096d075209672eb321cff408"}, + {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c3020404e0b5eefd7c9485ccf8393cfb75ec38ce75586e046573c9dc29967a0"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ed11165dd45ce798d99a136808a794a748d5dc38511303239d4e2363c0695dc"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4093c631e96fdd49e0377a9c167bfd75b6d0bad2ace734c6eb20b348bc3ea180"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e4c4629ddad63006efa0ef968c8e4751c5868ff0b1c5c40f76524e894c50248"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:861bf317735688269936f755fa136a99d1ed526883859f86e41a5d43c61d8966"}, + {file = "Brotli-1.1.0-cp312-cp312-win32.whl", hash = "sha256:5f4d5ea15c9382135076d2fb28dde923352fe02951e66935a9efaac8f10e81b0"}, + {file = "Brotli-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:906bc3a79de8c4ae5b86d3d75a8b77e44404b0f4261714306e3ad248d8ab0951"}, + {file = "Brotli-1.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a090ca607cbb6a34b0391776f0cb48062081f5f60ddcce5d11838e67a01928d1"}, + {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de9d02f5bda03d27ede52e8cfe7b865b066fa49258cbab568720aa5be80a47d"}, + {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2333e30a5e00fe0fe55903c8832e08ee9c3b1382aacf4db26664a16528d51b4b"}, + {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4d4a848d1837973bf0f4b5e54e3bec977d99be36a7895c61abb659301b02c112"}, + {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fdc3ff3bfccdc6b9cc7c342c03aa2400683f0cb891d46e94b64a197910dc4064"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:5eeb539606f18a0b232d4ba45adccde4125592f3f636a6182b4a8a436548b914"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:fd5f17ff8f14003595ab414e45fce13d073e0762394f957182e69035c9f3d7c2"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:069a121ac97412d1fe506da790b3e69f52254b9df4eb665cd42460c837193354"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:e93dfc1a1165e385cc8239fab7c036fb2cd8093728cbd85097b284d7b99249a2"}, + {file = "Brotli-1.1.0-cp36-cp36m-win32.whl", hash = "sha256:a599669fd7c47233438a56936988a2478685e74854088ef5293802123b5b2460"}, + {file = "Brotli-1.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:d143fd47fad1db3d7c27a1b1d66162e855b5d50a89666af46e1679c496e8e579"}, + {file = "Brotli-1.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:11d00ed0a83fa22d29bc6b64ef636c4552ebafcef57154b4ddd132f5638fbd1c"}, + {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f733d788519c7e3e71f0855c96618720f5d3d60c3cb829d8bbb722dddce37985"}, + {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:929811df5462e182b13920da56c6e0284af407d1de637d8e536c5cd00a7daf60"}, + {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0b63b949ff929fbc2d6d3ce0e924c9b93c9785d877a21a1b678877ffbbc4423a"}, + {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d192f0f30804e55db0d0e0a35d83a9fead0e9a359a9ed0285dbacea60cc10a84"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f296c40e23065d0d6650c4aefe7470d2a25fffda489bcc3eb66083f3ac9f6643"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:919e32f147ae93a09fe064d77d5ebf4e35502a8df75c29fb05788528e330fe74"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:23032ae55523cc7bccb4f6a0bf368cd25ad9bcdcc1990b64a647e7bbcce9cb5b"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:224e57f6eac61cc449f498cc5f0e1725ba2071a3d4f48d5d9dffba42db196438"}, + {file = "Brotli-1.1.0-cp37-cp37m-win32.whl", hash = "sha256:587ca6d3cef6e4e868102672d3bd9dc9698c309ba56d41c2b9c85bbb903cdb95"}, + {file = "Brotli-1.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2954c1c23f81c2eaf0b0717d9380bd348578a94161a65b3a2afc62c86467dd68"}, + {file = "Brotli-1.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:efa8b278894b14d6da122a72fefcebc28445f2d3f880ac59d46c90f4c13be9a3"}, + {file = "Brotli-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:03d20af184290887bdea3f0f78c4f737d126c74dc2f3ccadf07e54ceca3bf208"}, + {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6172447e1b368dcbc458925e5ddaf9113477b0ed542df258d84fa28fc45ceea7"}, + {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a743e5a28af5f70f9c080380a5f908d4d21d40e8f0e0c8901604d15cfa9ba751"}, + {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0541e747cce78e24ea12d69176f6a7ddb690e62c425e01d31cc065e69ce55b48"}, + {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cdbc1fc1bc0bff1cef838eafe581b55bfbffaed4ed0318b724d0b71d4d377619"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:890b5a14ce214389b2cc36ce82f3093f96f4cc730c1cffdbefff77a7c71f2a97"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ab4fbee0b2d9098c74f3057b2bc055a8bd92ccf02f65944a241b4349229185a"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:141bd4d93984070e097521ed07e2575b46f817d08f9fa42b16b9b5f27b5ac088"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fce1473f3ccc4187f75b4690cfc922628aed4d3dd013d047f95a9b3919a86596"}, + {file = "Brotli-1.1.0-cp38-cp38-win32.whl", hash = "sha256:db85ecf4e609a48f4b29055f1e144231b90edc90af7481aa731ba2d059226b1b"}, + {file = "Brotli-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3d7954194c36e304e1523f55d7042c59dc53ec20dd4e9ea9d151f1b62b4415c0"}, + {file = "Brotli-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5fb2ce4b8045c78ebbc7b8f3c15062e435d47e7393cc57c25115cfd49883747a"}, + {file = "Brotli-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7905193081db9bfa73b1219140b3d315831cbff0d8941f22da695832f0dd188f"}, + {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a77def80806c421b4b0af06f45d65a136e7ac0bdca3c09d9e2ea4e515367c7e9"}, + {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8dadd1314583ec0bf2d1379f7008ad627cd6336625d6679cf2f8e67081b83acf"}, + {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:901032ff242d479a0efa956d853d16875d42157f98951c0230f69e69f9c09bac"}, + {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:22fc2a8549ffe699bfba2256ab2ed0421a7b8fadff114a3d201794e45a9ff578"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ae15b066e5ad21366600ebec29a7ccbc86812ed267e4b28e860b8ca16a2bc474"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:949f3b7c29912693cee0afcf09acd6ebc04c57af949d9bf77d6101ebb61e388c"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:89f4988c7203739d48c6f806f1e87a1d96e0806d44f0fba61dba81392c9e474d"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:de6551e370ef19f8de1807d0a9aa2cdfdce2e85ce88b122fe9f6b2b076837e59"}, + {file = "Brotli-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f0d8a7a6b5983c2496e364b969f0e526647a06b075d034f3297dc66f3b360c64"}, + {file = "Brotli-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cdad5b9014d83ca68c25d2e9444e28e967ef16e80f6b436918c700c117a85467"}, + {file = "Brotli-1.1.0.tar.gz", hash = "sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724"}, +] + +[[package]] +name = "brotlicffi" +version = "1.1.0.0" +description = "Python CFFI bindings to the Brotli library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "platform_python_implementation != \"CPython\"" +files = [ + {file = "brotlicffi-1.1.0.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9b7ae6bd1a3f0df532b6d67ff674099a96d22bc0948955cb338488c31bfb8851"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19ffc919fa4fc6ace69286e0a23b3789b4219058313cf9b45625016bf7ff996b"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9feb210d932ffe7798ee62e6145d3a757eb6233aa9a4e7db78dd3690d7755814"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84763dbdef5dd5c24b75597a77e1b30c66604725707565188ba54bab4f114820"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-win32.whl", hash = "sha256:1b12b50e07c3911e1efa3a8971543e7648100713d4e0971b13631cce22c587eb"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:994a4f0681bb6c6c3b0925530a1926b7a189d878e6e5e38fae8efa47c5d9c613"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2e4aeb0bd2540cb91b069dbdd54d458da8c4334ceaf2d25df2f4af576d6766ca"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b7b0033b0d37bb33009fb2fef73310e432e76f688af76c156b3594389d81391"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54a07bb2374a1eba8ebb52b6fafffa2afd3c4df85ddd38fcc0511f2bb387c2a8"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7901a7dc4b88f1c1475de59ae9be59799db1007b7d059817948d8e4f12e24e35"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce01c7316aebc7fce59da734286148b1d1b9455f89cf2c8a4dfce7d41db55c2d"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:246f1d1a90279bb6069de3de8d75a8856e073b8ff0b09dcca18ccc14cec85979"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc4bc5d82bc56ebd8b514fb8350cfac4627d6b0743382e46d033976a5f80fab6"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37c26ecb14386a44b118ce36e546ce307f4810bc9598a6e6cb4f7fca725ae7e6"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca72968ae4eaf6470498d5c2887073f7efe3b1e7d7ec8be11a06a79cc810e990"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:add0de5b9ad9e9aa293c3aa4e9deb2b61e99ad6c1634e01d01d98c03e6a354cc"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9b6068e0f3769992d6b622a1cd2e7835eae3cf8d9da123d7f51ca9c1e9c333e5"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8557a8559509b61e65083f8782329188a250102372576093c88930c875a69838"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a7ae37e5d79c5bdfb5b4b99f2715a6035e6c5bf538c3746abc8e26694f92f33"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391151ec86bb1c683835980f4816272a87eaddc46bb91cbf44f62228b84d8cca"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2f3711be9290f0453de8eed5275d93d286abe26b08ab4a35d7452caa1fef532f"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1a807d760763e398bbf2c6394ae9da5815901aa93ee0a37bca5efe78d4ee3171"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa8ca0623b26c94fccc3a1fdd895be1743b838f3917300506d04aa3346fd2a14"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3de0cf28a53a3238b252aca9fed1593e9d36c1d116748013339f0949bfc84112"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6be5ec0e88a4925c91f3dea2bb0013b3a2accda6f77238f76a34a1ea532a1cb0"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d9eb71bb1085d996244439154387266fd23d6ad37161f6f52f1cd41dd95a3808"}, + {file = "brotlicffi-1.1.0.0.tar.gz", hash = "sha256:b77827a689905143f87915310b93b273ab17888fd43ef350d4832c4a71083c13"}, +] + +[package.dependencies] +cffi = ">=1.0.0" + +[[package]] +name = "certifi" +version = "2025.1.31" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +groups = ["main", "dev"] +files = [ + {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, + {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, +] + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "sys_platform == \"linux\" or sys_platform == \"darwin\" or platform_python_implementation != \"CPython\"" +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.4.1" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, + {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, + {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, +] + +[[package]] +name = "click" +version = "8.1.8" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] +markers = {main = "(extra == \"all\" or extra == \"downloads\") and platform_system == \"Windows\"", dev = "platform_system == \"Windows\" or sys_platform == \"win32\""} + +[[package]] +name = "coverage" +version = "7.6.12" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "coverage-7.6.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:704c8c8c6ce6569286ae9622e534b4f5b9759b6f2cd643f1c1a61f666d534fe8"}, + {file = "coverage-7.6.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ad7525bf0241e5502168ae9c643a2f6c219fa0a283001cee4cf23a9b7da75879"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06097c7abfa611c91edb9e6920264e5be1d6ceb374efb4986f38b09eed4cb2fe"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:220fa6c0ad7d9caef57f2c8771918324563ef0d8272c94974717c3909664e674"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3688b99604a24492bcfe1c106278c45586eb819bf66a654d8a9a1433022fb2eb"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d1a987778b9c71da2fc8948e6f2656da6ef68f59298b7e9786849634c35d2c3c"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:cec6b9ce3bd2b7853d4a4563801292bfee40b030c05a3d29555fd2a8ee9bd68c"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ace9048de91293e467b44bce0f0381345078389814ff6e18dbac8fdbf896360e"}, + {file = "coverage-7.6.12-cp310-cp310-win32.whl", hash = "sha256:ea31689f05043d520113e0552f039603c4dd71fa4c287b64cb3606140c66f425"}, + {file = "coverage-7.6.12-cp310-cp310-win_amd64.whl", hash = "sha256:676f92141e3c5492d2a1596d52287d0d963df21bf5e55c8b03075a60e1ddf8aa"}, + {file = "coverage-7.6.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e18aafdfb3e9ec0d261c942d35bd7c28d031c5855dadb491d2723ba54f4c3015"}, + {file = "coverage-7.6.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66fe626fd7aa5982cdebad23e49e78ef7dbb3e3c2a5960a2b53632f1f703ea45"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ef01d70198431719af0b1f5dcbefc557d44a190e749004042927b2a3fed0702"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e92ae5a289a4bc4c0aae710c0948d3c7892e20fd3588224ebe242039573bf0"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e695df2c58ce526eeab11a2e915448d3eb76f75dffe338ea613c1201b33bab2f"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d74c08e9aaef995f8c4ef6d202dbd219c318450fe2a76da624f2ebb9c8ec5d9f"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e995b3b76ccedc27fe4f477b349b7d64597e53a43fc2961db9d3fbace085d69d"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b1f097878d74fe51e1ddd1be62d8e3682748875b461232cf4b52ddc6e6db0bba"}, + {file = "coverage-7.6.12-cp311-cp311-win32.whl", hash = "sha256:1f7ffa05da41754e20512202c866d0ebfc440bba3b0ed15133070e20bf5aeb5f"}, + {file = "coverage-7.6.12-cp311-cp311-win_amd64.whl", hash = "sha256:e216c5c45f89ef8971373fd1c5d8d1164b81f7f5f06bbf23c37e7908d19e8558"}, + {file = "coverage-7.6.12-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b172f8e030e8ef247b3104902cc671e20df80163b60a203653150d2fc204d1ad"}, + {file = "coverage-7.6.12-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:641dfe0ab73deb7069fb972d4d9725bf11c239c309ce694dd50b1473c0f641c3"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e549f54ac5f301e8e04c569dfdb907f7be71b06b88b5063ce9d6953d2d58574"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:959244a17184515f8c52dcb65fb662808767c0bd233c1d8a166e7cf74c9ea985"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bda1c5f347550c359f841d6614fb8ca42ae5cb0b74d39f8a1e204815ebe25750"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1ceeb90c3eda1f2d8c4c578c14167dbd8c674ecd7d38e45647543f19839dd6ea"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f16f44025c06792e0fb09571ae454bcc7a3ec75eeb3c36b025eccf501b1a4c3"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b076e625396e787448d27a411aefff867db2bffac8ed04e8f7056b07024eed5a"}, + {file = "coverage-7.6.12-cp312-cp312-win32.whl", hash = "sha256:00b2086892cf06c7c2d74983c9595dc511acca00665480b3ddff749ec4fb2a95"}, + {file = "coverage-7.6.12-cp312-cp312-win_amd64.whl", hash = "sha256:7ae6eabf519bc7871ce117fb18bf14e0e343eeb96c377667e3e5dd12095e0288"}, + {file = "coverage-7.6.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:488c27b3db0ebee97a830e6b5a3ea930c4a6e2c07f27a5e67e1b3532e76b9ef1"}, + {file = "coverage-7.6.12-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d1095bbee1851269f79fd8e0c9b5544e4c00c0c24965e66d8cba2eb5bb535fd"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0533adc29adf6a69c1baa88c3d7dbcaadcffa21afbed3ca7a225a440e4744bf9"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53c56358d470fa507a2b6e67a68fd002364d23c83741dbc4c2e0680d80ca227e"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64cbb1a3027c79ca6310bf101014614f6e6e18c226474606cf725238cf5bc2d4"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:79cac3390bfa9836bb795be377395f28410811c9066bc4eefd8015258a7578c6"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b148068e881faa26d878ff63e79650e208e95cf1c22bd3f77c3ca7b1d9821a3"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8bec2ac5da793c2685ce5319ca9bcf4eee683b8a1679051f8e6ec04c4f2fd7dc"}, + {file = "coverage-7.6.12-cp313-cp313-win32.whl", hash = "sha256:200e10beb6ddd7c3ded322a4186313d5ca9e63e33d8fab4faa67ef46d3460af3"}, + {file = "coverage-7.6.12-cp313-cp313-win_amd64.whl", hash = "sha256:2b996819ced9f7dbb812c701485d58f261bef08f9b85304d41219b1496b591ef"}, + {file = "coverage-7.6.12-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:299cf973a7abff87a30609879c10df0b3bfc33d021e1adabc29138a48888841e"}, + {file = "coverage-7.6.12-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4b467a8c56974bf06e543e69ad803c6865249d7a5ccf6980457ed2bc50312703"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2458f275944db8129f95d91aee32c828a408481ecde3b30af31d552c2ce284a0"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a9d8be07fb0832636a0f72b80d2a652fe665e80e720301fb22b191c3434d924"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d47376a4f445e9743f6c83291e60adb1b127607a3618e3185bbc8091f0467b"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b95574d06aa9d2bd6e5cc35a5bbe35696342c96760b69dc4287dbd5abd4ad51d"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:ecea0c38c9079570163d663c0433a9af4094a60aafdca491c6a3d248c7432827"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2251fabcfee0a55a8578a9d29cecfee5f2de02f11530e7d5c5a05859aa85aee9"}, + {file = "coverage-7.6.12-cp313-cp313t-win32.whl", hash = "sha256:eb5507795caabd9b2ae3f1adc95f67b1104971c22c624bb354232d65c4fc90b3"}, + {file = "coverage-7.6.12-cp313-cp313t-win_amd64.whl", hash = "sha256:f60a297c3987c6c02ffb29effc70eadcbb412fe76947d394a1091a3615948e2f"}, + {file = "coverage-7.6.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e7575ab65ca8399c8c4f9a7d61bbd2d204c8b8e447aab9d355682205c9dd948d"}, + {file = "coverage-7.6.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8161d9fbc7e9fe2326de89cd0abb9f3599bccc1287db0aba285cb68d204ce929"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a1e465f398c713f1b212400b4e79a09829cd42aebd360362cd89c5bdc44eb87"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f25d8b92a4e31ff1bd873654ec367ae811b3a943583e05432ea29264782dc32c"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a936309a65cc5ca80fa9f20a442ff9e2d06927ec9a4f54bcba9c14c066323f2"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:aa6f302a3a0b5f240ee201297fff0bbfe2fa0d415a94aeb257d8b461032389bd"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f973643ef532d4f9be71dd88cf7588936685fdb576d93a79fe9f65bc337d9d73"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:78f5243bb6b1060aed6213d5107744c19f9571ec76d54c99cc15938eb69e0e86"}, + {file = "coverage-7.6.12-cp39-cp39-win32.whl", hash = "sha256:69e62c5034291c845fc4df7f8155e8544178b6c774f97a99e2734b05eb5bed31"}, + {file = "coverage-7.6.12-cp39-cp39-win_amd64.whl", hash = "sha256:b01a840ecc25dce235ae4c1b6a0daefb2a203dba0e6e980637ee9c2f6ee0df57"}, + {file = "coverage-7.6.12-pp39.pp310-none-any.whl", hash = "sha256:7e39e845c4d764208e7b8f6a21c541ade741e2c41afabdfa1caa28687a3c98cf"}, + {file = "coverage-7.6.12-py3-none-any.whl", hash = "sha256:eb8668cfbc279a536c633137deeb9435d2962caec279c3f8cf8b91fff6ff8953"}, + {file = "coverage-7.6.12.tar.gz", hash = "sha256:48cfc4641d95d34766ad41d9573cc0f22a48aa88d22657a1fe01dca0dbae4de2"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] + +[[package]] +name = "deprecation" +version = "2.1.0" +description = "A library to handle automated deprecations" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"}, + {file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"}, +] + +[package.dependencies] +packaging = "*" + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +markers = "python_version < \"3.11\"" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "frozenlist" +version = "1.5.0" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:15538c0cbf0e4fa11d1e3a71f823524b0c46299aed6e10ebb4c2089abd8c3bec"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e79225373c317ff1e35f210dd5f1344ff31066ba8067c307ab60254cd3a78ad5"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9272fa73ca71266702c4c3e2d4a28553ea03418e591e377a03b8e3659d94fa76"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498524025a5b8ba81695761d78c8dd7382ac0b052f34e66939c42df860b8ff17"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92b5278ed9d50fe610185ecd23c55d8b307d75ca18e94c0e7de328089ac5dcba"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f3c8c1dacd037df16e85227bac13cca58c30da836c6f936ba1df0c05d046d8d"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f2ac49a9bedb996086057b75bf93538240538c6d9b38e57c82d51f75a73409d2"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e66cc454f97053b79c2ab09c17fbe3c825ea6b4de20baf1be28919460dd7877f"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5a3ba5f9a0dfed20337d3e966dc359784c9f96503674c2faf015f7fe8e96798c"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6321899477db90bdeb9299ac3627a6a53c7399c8cd58d25da094007402b039ab"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:76e4753701248476e6286f2ef492af900ea67d9706a0155335a40ea21bf3b2f5"}, + {file = "frozenlist-1.5.0-cp310-cp310-win32.whl", hash = "sha256:977701c081c0241d0955c9586ffdd9ce44f7a7795df39b9151cd9a6fd0ce4cfb"}, + {file = "frozenlist-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:189f03b53e64144f90990d29a27ec4f7997d91ed3d01b51fa39d2dbe77540fd4"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fd74520371c3c4175142d02a976aee0b4cb4a7cc912a60586ffd8d5929979b30"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2f3f7a0fbc219fb4455264cae4d9f01ad41ae6ee8524500f381de64ffaa077d5"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f47c9c9028f55a04ac254346e92977bf0f166c483c74b4232bee19a6697e4778"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0996c66760924da6e88922756d99b47512a71cfd45215f3570bf1e0b694c206a"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2fe128eb4edeabe11896cb6af88fca5346059f6c8d807e3b910069f39157869"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a8ea951bbb6cacd492e3948b8da8c502a3f814f5d20935aae74b5df2b19cf3d"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de537c11e4aa01d37db0d403b57bd6f0546e71a82347a97c6a9f0dcc532b3a45"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c2623347b933fcb9095841f1cc5d4ff0b278addd743e0e966cb3d460278840d"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cee6798eaf8b1416ef6909b06f7dc04b60755206bddc599f52232606e18179d3"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f5f9da7f5dbc00a604fe74aa02ae7c98bcede8a3b8b9666f9f86fc13993bc71a"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:90646abbc7a5d5c7c19461d2e3eeb76eb0b204919e6ece342feb6032c9325ae9"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:bdac3c7d9b705d253b2ce370fde941836a5f8b3c5c2b8fd70940a3ea3af7f4f2"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03d33c2ddbc1816237a67f66336616416e2bbb6beb306e5f890f2eb22b959cdf"}, + {file = "frozenlist-1.5.0-cp311-cp311-win32.whl", hash = "sha256:237f6b23ee0f44066219dae14c70ae38a63f0440ce6750f868ee08775073f942"}, + {file = "frozenlist-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:0cc974cc93d32c42e7b0f6cf242a6bd941c57c61b618e78b6c0a96cb72788c1d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f"}, + {file = "frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8"}, + {file = "frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03"}, + {file = "frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c"}, + {file = "frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:dd94994fc91a6177bfaafd7d9fd951bc8689b0a98168aa26b5f543868548d3ca"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0da8bbec082bf6bf18345b180958775363588678f64998c2b7609e34719b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:73f2e31ea8dd7df61a359b731716018c2be196e5bb3b74ddba107f694fbd7604"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:828afae9f17e6de596825cf4228ff28fbdf6065974e5ac1410cecc22f699d2b3"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1577515d35ed5649d52ab4319db757bb881ce3b2b796d7283e6634d99ace307"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2150cc6305a2c2ab33299453e2968611dacb970d2283a14955923062c8d00b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a72b7a6e3cd2725eff67cd64c8f13335ee18fc3c7befc05aed043d24c7b9ccb9"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c16d2fa63e0800723139137d667e1056bee1a1cf7965153d2d104b62855e9b99"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:17dcc32fc7bda7ce5875435003220a457bcfa34ab7924a49a1c19f55b6ee185c"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:97160e245ea33d8609cd2b8fd997c850b56db147a304a262abc2b3be021a9171"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f1e6540b7fa044eee0bb5111ada694cf3dc15f2b0347ca125ee9ca984d5e9e6e"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:91d6c171862df0a6c61479d9724f22efb6109111017c87567cfeb7b5d1449fdf"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c1fac3e2ace2eb1052e9f7c7db480818371134410e1f5c55d65e8f3ac6d1407e"}, + {file = "frozenlist-1.5.0-cp38-cp38-win32.whl", hash = "sha256:b97f7b575ab4a8af9b7bc1d2ef7f29d3afee2226bd03ca3875c16451ad5a7723"}, + {file = "frozenlist-1.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:374ca2dabdccad8e2a76d40b1d037f5bd16824933bf7bcea3e59c891fd4a0923"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9bbcdfaf4af7ce002694a4e10a0159d5a8d20056a12b05b45cea944a4953f972"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1893f948bf6681733aaccf36c5232c231e3b5166d607c5fa77773611df6dc336"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b5e23253bb709ef57a8e95e6ae48daa9ac5f265637529e4ce6b003a37b2621f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f253985bb515ecd89629db13cb58d702035ecd8cfbca7d7a7e29a0e6d39af5f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04a5c6babd5e8fb7d3c871dc8b321166b80e41b637c31a995ed844a6139942b6"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fe0f1c29ba24ba6ff6abf688cb0b7cf1efab6b6aa6adc55441773c252f7411"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226d72559fa19babe2ccd920273e767c96a49b9d3d38badd7c91a0fdeda8ea08"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b731db116ab3aedec558573c1a5eec78822b32292fe4f2f0345b7f697745c2"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:366d8f93e3edfe5a918c874702f78faac300209a4d5bf38352b2c1bdc07a766d"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1b96af8c582b94d381a1c1f51ffaedeb77c821c690ea5f01da3d70a487dd0a9b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c03eff4a41bd4e38415cbed054bbaff4a075b093e2394b6915dca34a40d1e38b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:50cf5e7ee9b98f22bdecbabf3800ae78ddcc26e4a435515fc72d97903e8488e0"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e76bfbc72353269c44e0bc2cfe171900fbf7f722ad74c9a7b638052afe6a00c"}, + {file = "frozenlist-1.5.0-cp39-cp39-win32.whl", hash = "sha256:666534d15ba8f0fda3f53969117383d5dc021266b3c1a42c9ec4855e4b58b9d3"}, + {file = "frozenlist-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:5c28f4b5dbef8a0d8aad0d4de24d1e9e981728628afaf4ea0792f5d0939372f0"}, + {file = "frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3"}, + {file = "frozenlist-1.5.0.tar.gz", hash = "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817"}, +] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +groups = ["main", "dev"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "indico-client" +version = "6.14.0" +description = "A Python Wrapper for indico app API." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "indico-client-6.14.0.tar.gz", hash = "sha256:48b8103e5e15d7c36a73fe1b0fca5f9ac047413968aceb8639c46304979fb6c6"}, +] + +[package.dependencies] +aiohttp = {version = "*", extras = ["speedups"]} +deprecation = ">=2.1.0" +jsons = "*" +requests = ">=2.22.0" +setuptools = ">=41.4.0" + +[package.extras] +all = ["msgpack (>=0.5.6)", "msgpack-numpy (==0.4.4.3)", "numpy (>=1.16.0)", "pandas (>=1.0.3)"] +datasets = ["pandas (>=1.0.3)"] +deserialization = ["msgpack (>=0.5.6)", "msgpack-numpy (==0.4.4.3)", "numpy (>=1.16.0)"] +exports = ["pandas (>=1.0.3)"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "jsons" +version = "1.6.3" +description = "For serializing Python objects to JSON (dicts) and back" +optional = false +python-versions = ">=3.5" +groups = ["main"] +files = [ + {file = "jsons-1.6.3-py3-none-any.whl", hash = "sha256:f07f8919316f72a3843c7ca6cc6c900513089f10092626934d1bfe4b5cf15401"}, + {file = "jsons-1.6.3.tar.gz", hash = "sha256:cd5815c7c6790ae11c70ad9978e0aa850d0d08a643a5105cc604eac8b29a30d7"}, +] + +[package.dependencies] +typish = ">=1.9.2" + +[package.extras] +test = ["attrs", "codecov", "coverage", "dataclasses ; python_version == \"3.6\"", "pytest", "scons", "tzdata ; python_version >= \"3.9\""] + +[[package]] +name = "multidict" +version = "6.1.0" +description = "multidict implementation" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"}, + {file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"}, + {file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753"}, + {file = "multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80"}, + {file = "multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"}, + {file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"}, + {file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6"}, + {file = "multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81"}, + {file = "multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd"}, + {file = "multidict-6.1.0-cp38-cp38-win32.whl", hash = "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167"}, + {file = "multidict-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43"}, + {file = "multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada"}, + {file = "multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a"}, + {file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"}, + {file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "mypy" +version = "1.15.0" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13"}, + {file = "mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559"}, + {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b"}, + {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3"}, + {file = "mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b"}, + {file = "mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828"}, + {file = "mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f"}, + {file = "mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5"}, + {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e"}, + {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c"}, + {file = "mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f"}, + {file = "mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f"}, + {file = "mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd"}, + {file = "mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f"}, + {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464"}, + {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee"}, + {file = "mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e"}, + {file = "mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22"}, + {file = "mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445"}, + {file = "mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d"}, + {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5"}, + {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036"}, + {file = "mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357"}, + {file = "mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf"}, + {file = "mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078"}, + {file = "mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba"}, + {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5"}, + {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b"}, + {file = "mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2"}, + {file = "mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980"}, + {file = "mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e"}, + {file = "mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43"}, +] + +[package.dependencies] +mypy_extensions = ">=1.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing_extensions = ">=4.6.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +groups = ["dev"] +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "numpy" +version = "2.2.3" +description = "Fundamental package for array computing in Python" +optional = true +python-versions = ">=3.10" +groups = ["main"] +markers = "extra == \"all\" or extra == \"downloads\" or extra == \"examples\" or extra == \"metrics\" or extra == \"predictions\" or extra == \"snapshots\"" +files = [ + {file = "numpy-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cbc6472e01952d3d1b2772b720428f8b90e2deea8344e854df22b0618e9cce71"}, + {file = "numpy-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdfe0c22692a30cd830c0755746473ae66c4a8f2e7bd508b35fb3b6a0813d787"}, + {file = "numpy-2.2.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:e37242f5324ffd9f7ba5acf96d774f9276aa62a966c0bad8dae692deebec7716"}, + {file = "numpy-2.2.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:95172a21038c9b423e68be78fd0be6e1b97674cde269b76fe269a5dfa6fadf0b"}, + {file = "numpy-2.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5b47c440210c5d1d67e1cf434124e0b5c395eee1f5806fdd89b553ed1acd0a3"}, + {file = "numpy-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0391ea3622f5c51a2e29708877d56e3d276827ac5447d7f45e9bc4ade8923c52"}, + {file = "numpy-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f6b3dfc7661f8842babd8ea07e9897fe3d9b69a1d7e5fbb743e4160f9387833b"}, + {file = "numpy-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1ad78ce7f18ce4e7df1b2ea4019b5817a2f6a8a16e34ff2775f646adce0a5027"}, + {file = "numpy-2.2.3-cp310-cp310-win32.whl", hash = "sha256:5ebeb7ef54a7be11044c33a17b2624abe4307a75893c001a4800857956b41094"}, + {file = "numpy-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:596140185c7fa113563c67c2e894eabe0daea18cf8e33851738c19f70ce86aeb"}, + {file = "numpy-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:16372619ee728ed67a2a606a614f56d3eabc5b86f8b615c79d01957062826ca8"}, + {file = "numpy-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5521a06a3148686d9269c53b09f7d399a5725c47bbb5b35747e1cb76326b714b"}, + {file = "numpy-2.2.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:7c8dde0ca2f77828815fd1aedfdf52e59071a5bae30dac3b4da2a335c672149a"}, + {file = "numpy-2.2.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:77974aba6c1bc26e3c205c2214f0d5b4305bdc719268b93e768ddb17e3fdd636"}, + {file = "numpy-2.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d42f9c36d06440e34226e8bd65ff065ca0963aeecada587b937011efa02cdc9d"}, + {file = "numpy-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2712c5179f40af9ddc8f6727f2bd910ea0eb50206daea75f58ddd9fa3f715bb"}, + {file = "numpy-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c8b0451d2ec95010d1db8ca733afc41f659f425b7f608af569711097fd6014e2"}, + {file = "numpy-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d9b4a8148c57ecac25a16b0e11798cbe88edf5237b0df99973687dd866f05e1b"}, + {file = "numpy-2.2.3-cp311-cp311-win32.whl", hash = "sha256:1f45315b2dc58d8a3e7754fe4e38b6fce132dab284a92851e41b2b344f6441c5"}, + {file = "numpy-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f48ba6f6c13e5e49f3d3efb1b51c8193215c42ac82610a04624906a9270be6f"}, + {file = "numpy-2.2.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:12c045f43b1d2915eca6b880a7f4a256f59d62df4f044788c8ba67709412128d"}, + {file = "numpy-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:87eed225fd415bbae787f93a457af7f5990b92a334e346f72070bf569b9c9c95"}, + {file = "numpy-2.2.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:712a64103d97c404e87d4d7c47fb0c7ff9acccc625ca2002848e0d53288b90ea"}, + {file = "numpy-2.2.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a5ae282abe60a2db0fd407072aff4599c279bcd6e9a2475500fc35b00a57c532"}, + {file = "numpy-2.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5266de33d4c3420973cf9ae3b98b54a2a6d53a559310e3236c4b2b06b9c07d4e"}, + {file = "numpy-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b787adbf04b0db1967798dba8da1af07e387908ed1553a0d6e74c084d1ceafe"}, + {file = "numpy-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:34c1b7e83f94f3b564b35f480f5652a47007dd91f7c839f404d03279cc8dd021"}, + {file = "numpy-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4d8335b5f1b6e2bce120d55fb17064b0262ff29b459e8493d1785c18ae2553b8"}, + {file = "numpy-2.2.3-cp312-cp312-win32.whl", hash = "sha256:4d9828d25fb246bedd31e04c9e75714a4087211ac348cb39c8c5f99dbb6683fe"}, + {file = "numpy-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:83807d445817326b4bcdaaaf8e8e9f1753da04341eceec705c001ff342002e5d"}, + {file = "numpy-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7bfdb06b395385ea9b91bf55c1adf1b297c9fdb531552845ff1d3ea6e40d5aba"}, + {file = "numpy-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:23c9f4edbf4c065fddb10a4f6e8b6a244342d95966a48820c614891e5059bb50"}, + {file = "numpy-2.2.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:a0c03b6be48aaf92525cccf393265e02773be8fd9551a2f9adbe7db1fa2b60f1"}, + {file = "numpy-2.2.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:2376e317111daa0a6739e50f7ee2a6353f768489102308b0d98fcf4a04f7f3b5"}, + {file = "numpy-2.2.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8fb62fe3d206d72fe1cfe31c4a1106ad2b136fcc1606093aeab314f02930fdf2"}, + {file = "numpy-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52659ad2534427dffcc36aac76bebdd02b67e3b7a619ac67543bc9bfe6b7cdb1"}, + {file = "numpy-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1b416af7d0ed3271cad0f0a0d0bee0911ed7eba23e66f8424d9f3dfcdcae1304"}, + {file = "numpy-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1402da8e0f435991983d0a9708b779f95a8c98c6b18a171b9f1be09005e64d9d"}, + {file = "numpy-2.2.3-cp313-cp313-win32.whl", hash = "sha256:136553f123ee2951bfcfbc264acd34a2fc2f29d7cdf610ce7daf672b6fbaa693"}, + {file = "numpy-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:5b732c8beef1d7bc2d9e476dbba20aaff6167bf205ad9aa8d30913859e82884b"}, + {file = "numpy-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:435e7a933b9fda8126130b046975a968cc2d833b505475e588339e09f7672890"}, + {file = "numpy-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7678556eeb0152cbd1522b684dcd215250885993dd00adb93679ec3c0e6e091c"}, + {file = "numpy-2.2.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:2e8da03bd561504d9b20e7a12340870dfc206c64ea59b4cfee9fceb95070ee94"}, + {file = "numpy-2.2.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:c9aa4496fd0e17e3843399f533d62857cef5900facf93e735ef65aa4bbc90ef0"}, + {file = "numpy-2.2.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4ca91d61a4bf61b0f2228f24bbfa6a9facd5f8af03759fe2a655c50ae2c6610"}, + {file = "numpy-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:deaa09cd492e24fd9b15296844c0ad1b3c976da7907e1c1ed3a0ad21dded6f76"}, + {file = "numpy-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:246535e2f7496b7ac85deffe932896a3577be7af8fb7eebe7146444680297e9a"}, + {file = "numpy-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:daf43a3d1ea699402c5a850e5313680ac355b4adc9770cd5cfc2940e7861f1bf"}, + {file = "numpy-2.2.3-cp313-cp313t-win32.whl", hash = "sha256:cf802eef1f0134afb81fef94020351be4fe1d6681aadf9c5e862af6602af64ef"}, + {file = "numpy-2.2.3-cp313-cp313t-win_amd64.whl", hash = "sha256:aee2512827ceb6d7f517c8b85aa5d3923afe8fc7a57d028cffcd522f1c6fd082"}, + {file = "numpy-2.2.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3c2ec8a0f51d60f1e9c0c5ab116b7fc104b165ada3f6c58abf881cb2eb16044d"}, + {file = "numpy-2.2.3-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:ed2cf9ed4e8ebc3b754d398cba12f24359f018b416c380f577bbae112ca52fc9"}, + {file = "numpy-2.2.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39261798d208c3095ae4f7bc8eaeb3481ea8c6e03dc48028057d3cbdbdb8937e"}, + {file = "numpy-2.2.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:783145835458e60fa97afac25d511d00a1eca94d4a8f3ace9fe2043003c678e4"}, + {file = "numpy-2.2.3.tar.gz", hash = "sha256:dbdc15f0c81611925f382dfa97b3bd0bc2c1ce19d4fe50482cb0ddc12ba30020"}, +] + +[[package]] +name = "packaging" +version = "24.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, +] + +[[package]] +name = "pandas" +version = "2.2.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = true +python-versions = ">=3.9" +groups = ["main"] +markers = "extra == \"all\" or extra == \"downloads\" or extra == \"examples\" or extra == \"metrics\" or extra == \"predictions\" or extra == \"snapshots\"" +files = [ + {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, + {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, + {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, + {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, + {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, + {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, + {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.22.4", markers = "python_version < \"3.11\""}, + {version = ">=1.23.2", markers = "python_version == \"3.11\""}, + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.7" + +[package.extras] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +pyarrow = ["pyarrow (>=10.0.1)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.9.2)"] + +[[package]] +name = "pastel" +version = "0.2.1" +description = "Bring colors to your terminal." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["dev"] +files = [ + {file = "pastel-0.2.1-py2.py3-none-any.whl", hash = "sha256:4349225fcdf6c2bb34d483e523475de5bb04a5c10ef711263452cb37d7dd4364"}, + {file = "pastel-0.2.1.tar.gz", hash = "sha256:e6581ac04e973cac858828c6202c1e1e81fee1dc7de7683f3e1ffe0bfd8a573d"}, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "platformdirs" +version = "4.3.6" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] + +[[package]] +name = "plotly" +version = "5.24.1" +description = "An open-source, interactive data visualization library for Python" +optional = true +python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"all\" or extra == \"metrics\"" +files = [ + {file = "plotly-5.24.1-py3-none-any.whl", hash = "sha256:f67073a1e637eb0dc3e46324d9d51e2fe76e9727c892dde64ddf1e1b51f29089"}, + {file = "plotly-5.24.1.tar.gz", hash = "sha256:dbc8ac8339d248a4bcc36e08a5659bacfe1b079390b8953533f4eb22169b4bae"}, +] + +[package.dependencies] +packaging = "*" +tenacity = ">=6.2.0" + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "poethepoet" +version = "0.32.2" +description = "A task runner that works well with poetry." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "poethepoet-0.32.2-py3-none-any.whl", hash = "sha256:97e165de8e00b07d33fd8d72896fad8b20ccafcd327b1118bb6a3da26af38d33"}, + {file = "poethepoet-0.32.2.tar.gz", hash = "sha256:1d68871dac1b191e27bd68fea57d0e01e9afbba3fcd01dbe6f6bc3fcb071fe4c"}, +] + +[package.dependencies] +pastel = ">=0.2.1,<0.3.0" +pyyaml = ">=6.0.2,<7.0" +tomli = {version = ">=1.2.2", markers = "python_version < \"3.11\""} + +[package.extras] +poetry-plugin = ["poetry (>=1.2.0,<3.0.0) ; python_version < \"4.0\""] + +[[package]] +name = "propcache" +version = "0.3.0" +description = "Accelerated property cache" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "propcache-0.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:efa44f64c37cc30c9f05932c740a8b40ce359f51882c70883cc95feac842da4d"}, + {file = "propcache-0.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2383a17385d9800b6eb5855c2f05ee550f803878f344f58b6e194de08b96352c"}, + {file = "propcache-0.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d3e7420211f5a65a54675fd860ea04173cde60a7cc20ccfbafcccd155225f8bc"}, + {file = "propcache-0.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3302c5287e504d23bb0e64d2a921d1eb4a03fb93a0a0aa3b53de059f5a5d737d"}, + {file = "propcache-0.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7e2e068a83552ddf7a39a99488bcba05ac13454fb205c847674da0352602082f"}, + {file = "propcache-0.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d913d36bdaf368637b4f88d554fb9cb9d53d6920b9c5563846555938d5450bf"}, + {file = "propcache-0.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ee1983728964d6070ab443399c476de93d5d741f71e8f6e7880a065f878e0b9"}, + {file = "propcache-0.3.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:36ca5e9a21822cc1746023e88f5c0af6fce3af3b85d4520efb1ce4221bed75cc"}, + {file = "propcache-0.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9ecde3671e62eeb99e977f5221abcf40c208f69b5eb986b061ccec317c82ebd0"}, + {file = "propcache-0.3.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:d383bf5e045d7f9d239b38e6acadd7b7fdf6c0087259a84ae3475d18e9a2ae8b"}, + {file = "propcache-0.3.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:8cb625bcb5add899cb8ba7bf716ec1d3e8f7cdea9b0713fa99eadf73b6d4986f"}, + {file = "propcache-0.3.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5fa159dcee5dba00c1def3231c249cf261185189205073bde13797e57dd7540a"}, + {file = "propcache-0.3.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:a7080b0159ce05f179cfac592cda1a82898ca9cd097dacf8ea20ae33474fbb25"}, + {file = "propcache-0.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ed7161bccab7696a473fe7ddb619c1d75963732b37da4618ba12e60899fefe4f"}, + {file = "propcache-0.3.0-cp310-cp310-win32.whl", hash = "sha256:bf0d9a171908f32d54f651648c7290397b8792f4303821c42a74e7805bfb813c"}, + {file = "propcache-0.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:42924dc0c9d73e49908e35bbdec87adedd651ea24c53c29cac103ede0ea1d340"}, + {file = "propcache-0.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9ddd49258610499aab83b4f5b61b32e11fce873586282a0e972e5ab3bcadee51"}, + {file = "propcache-0.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2578541776769b500bada3f8a4eeaf944530516b6e90c089aa368266ed70c49e"}, + {file = "propcache-0.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8074c5dd61c8a3e915fa8fc04754fa55cfa5978200d2daa1e2d4294c1f136aa"}, + {file = "propcache-0.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b58229a844931bca61b3a20efd2be2a2acb4ad1622fc026504309a6883686fbf"}, + {file = "propcache-0.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e45377d5d6fefe1677da2a2c07b024a6dac782088e37c0b1efea4cfe2b1be19b"}, + {file = "propcache-0.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ec5060592d83454e8063e487696ac3783cc48c9a329498bafae0d972bc7816c9"}, + {file = "propcache-0.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15010f29fbed80e711db272909a074dc79858c6d28e2915704cfc487a8ac89c6"}, + {file = "propcache-0.3.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a254537b9b696ede293bfdbc0a65200e8e4507bc9f37831e2a0318a9b333c85c"}, + {file = "propcache-0.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2b975528998de037dfbc10144b8aed9b8dd5a99ec547f14d1cb7c5665a43f075"}, + {file = "propcache-0.3.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:19d36bb351ad5554ff20f2ae75f88ce205b0748c38b146c75628577020351e3c"}, + {file = "propcache-0.3.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6032231d4a5abd67c7f71168fd64a47b6b451fbcb91c8397c2f7610e67683810"}, + {file = "propcache-0.3.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6985a593417cdbc94c7f9c3403747335e450c1599da1647a5af76539672464d3"}, + {file = "propcache-0.3.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:6a1948df1bb1d56b5e7b0553c0fa04fd0e320997ae99689488201f19fa90d2e7"}, + {file = "propcache-0.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8319293e85feadbbfe2150a5659dbc2ebc4afdeaf7d98936fb9a2f2ba0d4c35c"}, + {file = "propcache-0.3.0-cp311-cp311-win32.whl", hash = "sha256:63f26258a163c34542c24808f03d734b338da66ba91f410a703e505c8485791d"}, + {file = "propcache-0.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:cacea77ef7a2195f04f9279297684955e3d1ae4241092ff0cfcef532bb7a1c32"}, + {file = "propcache-0.3.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e53d19c2bf7d0d1e6998a7e693c7e87300dd971808e6618964621ccd0e01fe4e"}, + {file = "propcache-0.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a61a68d630e812b67b5bf097ab84e2cd79b48c792857dc10ba8a223f5b06a2af"}, + {file = "propcache-0.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fb91d20fa2d3b13deea98a690534697742029f4fb83673a3501ae6e3746508b5"}, + {file = "propcache-0.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67054e47c01b7b349b94ed0840ccae075449503cf1fdd0a1fdd98ab5ddc2667b"}, + {file = "propcache-0.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:997e7b8f173a391987df40f3b52c423e5850be6f6df0dcfb5376365440b56667"}, + {file = "propcache-0.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d663fd71491dde7dfdfc899d13a067a94198e90695b4321084c6e450743b8c7"}, + {file = "propcache-0.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8884ba1a0fe7210b775106b25850f5e5a9dc3c840d1ae9924ee6ea2eb3acbfe7"}, + {file = "propcache-0.3.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa806bbc13eac1ab6291ed21ecd2dd426063ca5417dd507e6be58de20e58dfcf"}, + {file = "propcache-0.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6f4d7a7c0aff92e8354cceca6fe223973ddf08401047920df0fcb24be2bd5138"}, + {file = "propcache-0.3.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:9be90eebc9842a93ef8335291f57b3b7488ac24f70df96a6034a13cb58e6ff86"}, + {file = "propcache-0.3.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:bf15fc0b45914d9d1b706f7c9c4f66f2b7b053e9517e40123e137e8ca8958b3d"}, + {file = "propcache-0.3.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5a16167118677d94bb48bfcd91e420088854eb0737b76ec374b91498fb77a70e"}, + {file = "propcache-0.3.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:41de3da5458edd5678b0f6ff66691507f9885f5fe6a0fb99a5d10d10c0fd2d64"}, + {file = "propcache-0.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:728af36011bb5d344c4fe4af79cfe186729efb649d2f8b395d1572fb088a996c"}, + {file = "propcache-0.3.0-cp312-cp312-win32.whl", hash = "sha256:6b5b7fd6ee7b54e01759f2044f936dcf7dea6e7585f35490f7ca0420fe723c0d"}, + {file = "propcache-0.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:2d15bc27163cd4df433e75f546b9ac31c1ba7b0b128bfb1b90df19082466ff57"}, + {file = "propcache-0.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a2b9bf8c79b660d0ca1ad95e587818c30ccdb11f787657458d6f26a1ea18c568"}, + {file = "propcache-0.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b0c1a133d42c6fc1f5fbcf5c91331657a1ff822e87989bf4a6e2e39b818d0ee9"}, + {file = "propcache-0.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bb2f144c6d98bb5cbc94adeb0447cfd4c0f991341baa68eee3f3b0c9c0e83767"}, + {file = "propcache-0.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1323cd04d6e92150bcc79d0174ce347ed4b349d748b9358fd2e497b121e03c8"}, + {file = "propcache-0.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b812b3cb6caacd072276ac0492d249f210006c57726b6484a1e1805b3cfeea0"}, + {file = "propcache-0.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:742840d1d0438eb7ea4280f3347598f507a199a35a08294afdcc560c3739989d"}, + {file = "propcache-0.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c6e7e4f9167fddc438cd653d826f2222222564daed4116a02a184b464d3ef05"}, + {file = "propcache-0.3.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a94ffc66738da99232ddffcf7910e0f69e2bbe3a0802e54426dbf0714e1c2ffe"}, + {file = "propcache-0.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3c6ec957025bf32b15cbc6b67afe233c65b30005e4c55fe5768e4bb518d712f1"}, + {file = "propcache-0.3.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:549722908de62aa0b47a78b90531c022fa6e139f9166be634f667ff45632cc92"}, + {file = "propcache-0.3.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:5d62c4f6706bff5d8a52fd51fec6069bef69e7202ed481486c0bc3874912c787"}, + {file = "propcache-0.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:24c04f8fbf60094c531667b8207acbae54146661657a1b1be6d3ca7773b7a545"}, + {file = "propcache-0.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:7c5f5290799a3f6539cc5e6f474c3e5c5fbeba74a5e1e5be75587746a940d51e"}, + {file = "propcache-0.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4fa0e7c9c3cf7c276d4f6ab9af8adddc127d04e0fcabede315904d2ff76db626"}, + {file = "propcache-0.3.0-cp313-cp313-win32.whl", hash = "sha256:ee0bd3a7b2e184e88d25c9baa6a9dc609ba25b76daae942edfb14499ac7ec374"}, + {file = "propcache-0.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:1c8f7d896a16da9455f882870a507567d4f58c53504dc2d4b1e1d386dfe4588a"}, + {file = "propcache-0.3.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:e560fd75aaf3e5693b91bcaddd8b314f4d57e99aef8a6c6dc692f935cc1e6bbf"}, + {file = "propcache-0.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:65a37714b8ad9aba5780325228598a5b16c47ba0f8aeb3dc0514701e4413d7c0"}, + {file = "propcache-0.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:07700939b2cbd67bfb3b76a12e1412405d71019df00ca5697ce75e5ef789d829"}, + {file = "propcache-0.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c0fdbdf6983526e269e5a8d53b7ae3622dd6998468821d660d0daf72779aefa"}, + {file = "propcache-0.3.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:794c3dd744fad478b6232289c866c25406ecdfc47e294618bdf1697e69bd64a6"}, + {file = "propcache-0.3.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4544699674faf66fb6b4473a1518ae4999c1b614f0b8297b1cef96bac25381db"}, + {file = "propcache-0.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fddb8870bdb83456a489ab67c6b3040a8d5a55069aa6f72f9d872235fbc52f54"}, + {file = "propcache-0.3.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f857034dc68d5ceb30fb60afb6ff2103087aea10a01b613985610e007053a121"}, + {file = "propcache-0.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:02df07041e0820cacc8f739510078f2aadcfd3fc57eaeeb16d5ded85c872c89e"}, + {file = "propcache-0.3.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:f47d52fd9b2ac418c4890aad2f6d21a6b96183c98021f0a48497a904199f006e"}, + {file = "propcache-0.3.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:9ff4e9ecb6e4b363430edf2c6e50173a63e0820e549918adef70515f87ced19a"}, + {file = "propcache-0.3.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:ecc2920630283e0783c22e2ac94427f8cca29a04cfdf331467d4f661f4072dac"}, + {file = "propcache-0.3.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:c441c841e82c5ba7a85ad25986014be8d7849c3cfbdb6004541873505929a74e"}, + {file = "propcache-0.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6c929916cbdb540d3407c66f19f73387f43e7c12fa318a66f64ac99da601bcdf"}, + {file = "propcache-0.3.0-cp313-cp313t-win32.whl", hash = "sha256:0c3e893c4464ebd751b44ae76c12c5f5c1e4f6cbd6fbf67e3783cd93ad221863"}, + {file = "propcache-0.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:75e872573220d1ee2305b35c9813626e620768248425f58798413e9c39741f46"}, + {file = "propcache-0.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:03c091bb752349402f23ee43bb2bff6bd80ccab7c9df6b88ad4322258d6960fc"}, + {file = "propcache-0.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:46ed02532cb66612d42ae5c3929b5e98ae330ea0f3900bc66ec5f4862069519b"}, + {file = "propcache-0.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:11ae6a8a01b8a4dc79093b5d3ca2c8a4436f5ee251a9840d7790dccbd96cb649"}, + {file = "propcache-0.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df03cd88f95b1b99052b52b1bb92173229d7a674df0ab06d2b25765ee8404bce"}, + {file = "propcache-0.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:03acd9ff19021bd0567582ac88f821b66883e158274183b9e5586f678984f8fe"}, + {file = "propcache-0.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd54895e4ae7d32f1e3dd91261df46ee7483a735017dc6f987904f194aa5fd14"}, + {file = "propcache-0.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26a67e5c04e3119594d8cfae517f4b9330c395df07ea65eab16f3d559b7068fe"}, + {file = "propcache-0.3.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee25f1ac091def37c4b59d192bbe3a206298feeb89132a470325bf76ad122a1e"}, + {file = "propcache-0.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:58e6d2a5a7cb3e5f166fd58e71e9a4ff504be9dc61b88167e75f835da5764d07"}, + {file = "propcache-0.3.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:be90c94570840939fecedf99fa72839aed70b0ced449b415c85e01ae67422c90"}, + {file = "propcache-0.3.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:49ea05212a529c2caffe411e25a59308b07d6e10bf2505d77da72891f9a05641"}, + {file = "propcache-0.3.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:119e244ab40f70a98c91906d4c1f4c5f2e68bd0b14e7ab0a06922038fae8a20f"}, + {file = "propcache-0.3.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:507c5357a8d8b4593b97fb669c50598f4e6cccbbf77e22fa9598aba78292b4d7"}, + {file = "propcache-0.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8526b0941ec5a40220fc4dfde76aed58808e2b309c03e9fa8e2260083ef7157f"}, + {file = "propcache-0.3.0-cp39-cp39-win32.whl", hash = "sha256:7cedd25e5f678f7738da38037435b340694ab34d424938041aa630d8bac42663"}, + {file = "propcache-0.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:bf4298f366ca7e1ad1d21bbb58300a6985015909964077afd37559084590c929"}, + {file = "propcache-0.3.0-py3-none-any.whl", hash = "sha256:67dda3c7325691c2081510e92c561f465ba61b975f481735aefdfc845d2cd043"}, + {file = "propcache-0.3.0.tar.gz", hash = "sha256:a8fd93de4e1d278046345f49e2238cdb298589325849b2645d4a94c53faeffc5"}, +] + +[[package]] +name = "pycares" +version = "4.5.0" +description = "Python interface for c-ares" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "sys_platform == \"linux\" or sys_platform == \"darwin\"" +files = [ + {file = "pycares-4.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:13a82fad8239d6fbcf916099bee17d8b5666d0ddb77dace431e0f7961c9427ab"}, + {file = "pycares-4.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fefc7bebbe39b2e3b4b9615471233a8f7356b96129a7db9030313a3ae4ecc42d"}, + {file = "pycares-4.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e322e8ce810026f6e0c7c2a254b9ed02191ab8d42fa2ce6808ede1bdccab8e65"}, + {file = "pycares-4.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:723ba0803b016294430e40e544503fed9164949b694342c2552ab189e2b688ef"}, + {file = "pycares-4.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e48b20b59cdc929cc712a8b22e89c273256e482b49bb8999af98d2c6fc4563c2"}, + {file = "pycares-4.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de6e55bd9af595b112ac6080ac0a0d52b5853d0d8e6d01ac65ff09e51e62490a"}, + {file = "pycares-4.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a6f4b9063e3dd70460400367917698f209c10aabb68bf70b09e364895444487d"}, + {file = "pycares-4.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:95522d4840d702fd766439a7c7cd747935aa54cf0b8675e9fadd8414dd9dd0df"}, + {file = "pycares-4.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e4709ce4fd9dbee24b1397f71a2adb3267323bb5ad5e7fde3f87873d172dd156"}, + {file = "pycares-4.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8addbf3408af1010f50fd67ef634a6cb239ccb9c534c32a40713f3b8d306a98e"}, + {file = "pycares-4.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:d0428ef42fcf575e197047e6a47892404faa34231902a453b3dfed66af4178b3"}, + {file = "pycares-4.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:aed5c2732f3a6bdbbfab202267d37044ca1162f690b9d34b7ece97ba43f27453"}, + {file = "pycares-4.5.0-cp310-cp310-win32.whl", hash = "sha256:b1859ea770a7abec40a6d02b5ab03c2396c4900c01f4e50ddb6c0dca4c2a6a7c"}, + {file = "pycares-4.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:9f87d8da20a3a80ab05fe80c14a62bf078bd726ca6af609edbeb376fb97d50ab"}, + {file = "pycares-4.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5ca7a1dba7b88290710db45012e0903c21c839fa0a2b9ddc100bba8e66bfb251"}, + {file = "pycares-4.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:160e92588cdf1a0fa3a7015f47990b508d50efd9109ea4d719dee31c058f0648"}, + {file = "pycares-4.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f38e45d23660ed1dafdb956fd263ae4735530ef1578aa2bf2caabb94cee4523"}, + {file = "pycares-4.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f742acc6d29a99ffc14e3f154b3848ea05c5533b71065e0f0a0fd99c527491b2"}, + {file = "pycares-4.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceaf71bcd7b6447705e689b8fee8836c20c6148511a90122981f524a84bfcca9"}, + {file = "pycares-4.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdc3c0be7b5b83e78e28818fecd0405bd401110dd6e2e66f7f10713c1188362c"}, + {file = "pycares-4.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd458ee69800195247aa19b5675c5914cbc091c5a220e4f0e96777a31bb555c1"}, + {file = "pycares-4.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a6649d713df73266708642fc3d04f110c0a66bee510fbce4cc5fed79df42083"}, + {file = "pycares-4.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ac57d7bda925c10b997434e7ce30a2c3689c2e96bab9fd0a1165d5577378eecd"}, + {file = "pycares-4.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ba17d8e5eeec4b2e0eb1a6a840bae9e62cd1c1c9cbc8dc9db9d1b9fdf33d0b54"}, + {file = "pycares-4.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9e9b7d1a8de703283e4735c0e532ba4bc600e88de872dcd1a9a4950cf74d9f4f"}, + {file = "pycares-4.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4c6922ecbe458c13a4a2c1177bbce38abc44b5f086bc82115a92eab34418915f"}, + {file = "pycares-4.5.0-cp311-cp311-win32.whl", hash = "sha256:1004b8a17614e33410b4b1bb68360977667f1cc9ab2dbcfb27240d6703e4cb6a"}, + {file = "pycares-4.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:2c9c1055c622258a0f315560b2880a372363484b87cbef48af092624804caa72"}, + {file = "pycares-4.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:506efbe5017807747ccd1bdcb3c2f6e64635bc01fee01a50c0b97d649018c162"}, + {file = "pycares-4.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c469ec9fbe0526f45a98f67c1ea55be03abf30809c4f9c9be4bc93fb6806304d"}, + {file = "pycares-4.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:597c0950ede240c3a779f023fcf2442207fc11e570d3ca4ccdbb0db5bbaf2588"}, + {file = "pycares-4.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9aa0da03c4df6ed0f87dd52a293bd0508734515041cc5be0f85d9edc1814914f"}, + {file = "pycares-4.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aea1ebf52767c777d10a1b3d03844b9b05cc892714b3ee177d5d9fbff74fb9fa"}, + {file = "pycares-4.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb20d84269ddffb177b6048e3bc03d0b9ffe17592093d900d5544805958d86b3"}, + {file = "pycares-4.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3125df81b657971ee5c0333f8f560ba0151db1eb7cf04aea7d783bb433b306c1"}, + {file = "pycares-4.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:525c77ea44546c12f379641aee163585d403cf50e29b04a06059d6aac894e956"}, + {file = "pycares-4.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:1fd87cb26b317a9988abfcfa4e4dbc55d5f20177e5979ad4d854468a9246c187"}, + {file = "pycares-4.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a90aecd41188884e57ae32507a2c6b010c60b791a253083761bbb37a488ecaed"}, + {file = "pycares-4.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0d3de65cab653979dcc491e03f596566c9d40346c9deb088e0f9fe70600d8737"}, + {file = "pycares-4.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:27a77b43604b3ba24e4fc49fd3ea59f50f7d89c7255f1f1ea46928b26cccacfa"}, + {file = "pycares-4.5.0-cp312-cp312-win32.whl", hash = "sha256:6028cb8766f0fea1d2caa69fac23621fbe2cff9ce6968374e165737258703a33"}, + {file = "pycares-4.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:2ce10672c4cfd1c5fb6718e8b25f0336ca11c89aab88aa6df53dafc4e41df740"}, + {file = "pycares-4.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:011cd670da7caf55664c944abb71ec39af82b837f8d48da7cf0eec80f5682c4c"}, + {file = "pycares-4.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b5c67930497fb2b1dbcaa85f8c4188fc2cb62e41d787deeed2d33cfe9dd6bf52"}, + {file = "pycares-4.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d435a3b8468c656a7e7180dd7c4794510f6c612c33ad61a0fff6e440621f8b5"}, + {file = "pycares-4.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8371f5ee1efb33d6276e275d152c9c5605e5f2e58a9e168519ec1f9e13dd95ae"}, + {file = "pycares-4.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c76a9096fd5dc49c61c5235ea7032e8b43f4382800d64ca1e0e0cda700c082aa"}, + {file = "pycares-4.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b604af76b57469ff68b44e9e4c857eaee43bc5035f4f183f07f4f7149191fe1b"}, + {file = "pycares-4.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c589bd4f9160bfdb2f8080cf564bb120a4312cf091db07fe417f8e58a896a63c"}, + {file = "pycares-4.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:361262805bb09742c364ec0117842043c950339e38561009bcabbb6ac89458ef"}, + {file = "pycares-4.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6d2afb3c0776467055bf33db843ef483d25639be0f32e3a13ef5d4dc64098bf5"}, + {file = "pycares-4.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:bc7a1d8ed7c7a4de17706a3c89b305b02eb64c778897e6727c043e5b9dd0d853"}, + {file = "pycares-4.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:5703ec878b5c1efacdbf24ceaedfa606112fc67af5564f4db99c2c210f3ffadc"}, + {file = "pycares-4.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d87758e09dbf52c27ed7cf7bc7eaf8b3226217d10c52b03d61a14d59f40fcae1"}, + {file = "pycares-4.5.0-cp313-cp313-win32.whl", hash = "sha256:3316d490b4ce1a69f034881ac1ea7608f5f24ea5293db24ab574ac70b7d7e407"}, + {file = "pycares-4.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:018e700fb0d1a2db5ec96e404ffa85ed97cc96e96d6af0bb9548111e37cf36a3"}, + {file = "pycares-4.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:78c9890d93108c70708babee8a783e6021233f1f0a763d3634add6fd429aae58"}, + {file = "pycares-4.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba69f8123995aa3df99f6ebc726fc6a4b08e467a957b215c0a82749b901d5eed"}, + {file = "pycares-4.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32d33c4ffae31d1b544adebe0b9aee2be1fb18aedd3f4f91e41c495ccbafd6d8"}, + {file = "pycares-4.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:17a060cfc469828abf7f5945964d505bd8c0a756942fee159538f7885169752e"}, + {file = "pycares-4.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1d0d5e69fa29e41b590a9dd5842454e8f34e2b928c92540aaf87e0161de8120"}, + {file = "pycares-4.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f096699c46f5dde2c7a8d91501a36d2d58500f4d63682e2ec14a0fed7cca6402"}, + {file = "pycares-4.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:429fe2065581a64a5f024f507b5f679bf37ea0ed39c3ba6289dba907e1c8a8f4"}, + {file = "pycares-4.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9ea2f6d48e64b413b97b41b47392087b452af9bf9f9d4d6d05305a159f45909f"}, + {file = "pycares-4.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:96d3aecd747a3fcd1e12c1ea1481b0813b4e0e80d40f314db7a86dda5bb1bd94"}, + {file = "pycares-4.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:32919f6eda7f5ea4df3e64149fc5792b0d455277d23d6d0fc365142062f35d80"}, + {file = "pycares-4.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:37add862461f9a3fc7ee4dd8b68465812b39456e21cebd5a33c414131ac05060"}, + {file = "pycares-4.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ed1d050d2c6d74a77c1b6c51fd99426cc000b4202a50d28d6ca75f7433099a6b"}, + {file = "pycares-4.5.0-cp39-cp39-win32.whl", hash = "sha256:887ac451ffe6e39ee46d3d0989c7bb829933d77e1dad5776511d825fc7e6a25b"}, + {file = "pycares-4.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:5c8b87c05740595bc8051dc98e51f022f003750e7da90f62f7a9fd50e330b196"}, + {file = "pycares-4.5.0.tar.gz", hash = "sha256:025b6c2ffea4e9fb8f9a097381c2fecb24aff23fbd6906e70da22ec9ba60e19d"}, +] + +[package.dependencies] +cffi = ">=1.5.0" + +[package.extras] +idna = ["idna (>=2.1)"] + +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "sys_platform == \"linux\" or sys_platform == \"darwin\" or platform_python_implementation != \"CPython\"" +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + +[[package]] +name = "pytest" +version = "8.3.5" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820"}, + {file = "pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.25.3" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pytest_asyncio-0.25.3-py3-none-any.whl", hash = "sha256:9e89518e0f9bd08928f97a3482fdc4e244df17529460bc038291ccaf8f85c7c3"}, + {file = "pytest_asyncio-0.25.3.tar.gz", hash = "sha256:fc1da2cf9f125ada7e710b4ddad05518d4cee187ae9412e9ac9271003497f07a"}, +] + +[package.dependencies] +pytest = ">=8.2,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "pytest-cov" +version = "6.0.0" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0"}, + {file = "pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35"}, +] + +[package.dependencies] +coverage = {version = ">=7.5", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] + +[[package]] +name = "pytest-mock" +version = "3.14.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, + {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = true +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +markers = "extra == \"all\" or extra == \"downloads\" or extra == \"examples\" or extra == \"metrics\" or extra == \"predictions\" or extra == \"snapshots\"" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pytz" +version = "2025.1" +description = "World timezone definitions, modern and historical" +optional = true +python-versions = "*" +groups = ["main"] +markers = "extra == \"all\" or extra == \"downloads\" or extra == \"examples\" or extra == \"metrics\" or extra == \"predictions\" or extra == \"snapshots\"" +files = [ + {file = "pytz-2025.1-py2.py3-none-any.whl", hash = "sha256:89dd22dca55b46eac6eda23b2d72721bf1bdfef212645d81513ef5d03038de57"}, + {file = "pytz-2025.1.tar.gz", hash = "sha256:c2db42be2a2518b28e65f9207c4d05e6ff547d1efa4086469ef855e4ab70178e"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "requests-mock" +version = "1.12.1" +description = "Mock out responses from the requests package" +optional = false +python-versions = ">=3.5" +groups = ["dev"] +files = [ + {file = "requests-mock-1.12.1.tar.gz", hash = "sha256:e9e12e333b525156e82a3c852f22016b9158220d2f47454de9cae8a77d371401"}, + {file = "requests_mock-1.12.1-py2.py3-none-any.whl", hash = "sha256:b1e37054004cdd5e56c84454cc7df12b25f90f382159087f4b6915aaeef39563"}, +] + +[package.dependencies] +requests = ">=2.22,<3" + +[package.extras] +fixture = ["fixtures"] + +[[package]] +name = "ruff" +version = "0.9.9" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "ruff-0.9.9-py3-none-linux_armv6l.whl", hash = "sha256:628abb5ea10345e53dff55b167595a159d3e174d6720bf19761f5e467e68d367"}, + {file = "ruff-0.9.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b6cd1428e834b35d7493354723543b28cc11dc14d1ce19b685f6e68e07c05ec7"}, + {file = "ruff-0.9.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5ee162652869120ad260670706f3cd36cd3f32b0c651f02b6da142652c54941d"}, + {file = "ruff-0.9.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3aa0f6b75082c9be1ec5a1db78c6d4b02e2375c3068438241dc19c7c306cc61a"}, + {file = "ruff-0.9.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:584cc66e89fb5f80f84b05133dd677a17cdd86901d6479712c96597a3f28e7fe"}, + {file = "ruff-0.9.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abf3369325761a35aba75cd5c55ba1b5eb17d772f12ab168fbfac54be85cf18c"}, + {file = "ruff-0.9.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:3403a53a32a90ce929aa2f758542aca9234befa133e29f4933dcef28a24317be"}, + {file = "ruff-0.9.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:18454e7fa4e4d72cffe28a37cf6a73cb2594f81ec9f4eca31a0aaa9ccdfb1590"}, + {file = "ruff-0.9.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fadfe2c88724c9617339f62319ed40dcdadadf2888d5afb88bf3adee7b35bfb"}, + {file = "ruff-0.9.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6df104d08c442a1aabcfd254279b8cc1e2cbf41a605aa3e26610ba1ec4acf0b0"}, + {file = "ruff-0.9.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d7c62939daf5b2a15af48abbd23bea1efdd38c312d6e7c4cedf5a24e03207e17"}, + {file = "ruff-0.9.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9494ba82a37a4b81b6a798076e4a3251c13243fc37967e998efe4cce58c8a8d1"}, + {file = "ruff-0.9.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:4efd7a96ed6d36ef011ae798bf794c5501a514be369296c672dab7921087fa57"}, + {file = "ruff-0.9.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ab90a7944c5a1296f3ecb08d1cbf8c2da34c7e68114b1271a431a3ad30cb660e"}, + {file = "ruff-0.9.9-py3-none-win32.whl", hash = "sha256:6b4c376d929c25ecd6d87e182a230fa4377b8e5125a4ff52d506ee8c087153c1"}, + {file = "ruff-0.9.9-py3-none-win_amd64.whl", hash = "sha256:837982ea24091d4c1700ddb2f63b7070e5baec508e43b01de013dc7eff974ff1"}, + {file = "ruff-0.9.9-py3-none-win_arm64.whl", hash = "sha256:3ac78f127517209fe6d96ab00f3ba97cafe38718b23b1db3e96d8b2d39e37ddf"}, + {file = "ruff-0.9.9.tar.gz", hash = "sha256:0062ed13f22173e85f8f7056f9a24016e692efeea8704d1a5e8011b8aa850933"}, +] + +[[package]] +name = "setuptools" +version = "75.8.2" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "setuptools-75.8.2-py3-none-any.whl", hash = "sha256:558e47c15f1811c1fa7adbd0096669bf76c1d3f433f58324df69f3f5ecac4e8f"}, + {file = "setuptools-75.8.2.tar.gz", hash = "sha256:4880473a969e5f23f2a2be3646b2dfd84af9028716d398e46192f84bc36900d2"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""] +core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.collections", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] + +[[package]] +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" +optional = true +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +markers = "extra == \"all\" or extra == \"downloads\" or extra == \"examples\" or extra == \"metrics\" or extra == \"predictions\" or extra == \"snapshots\"" +files = [ + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, +] + +[[package]] +name = "tenacity" +version = "9.0.0" +description = "Retry code until it succeeds" +optional = true +python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"all\" or extra == \"metrics\"" +files = [ + {file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"}, + {file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"}, +] + +[package.extras] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] + +[[package]] +name = "tomli" +version = "2.2.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_full_version <= \"3.11.0a6\"" +files = [ + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, +] + +[[package]] +name = "tqdm" +version = "4.67.1" +description = "Fast, Extensible Progress Meter" +optional = true +python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"all\" or extra == \"downloads\"" +files = [ + {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, + {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["nbval", "pytest (>=6)", "pytest-asyncio (>=0.24)", "pytest-cov", "pytest-timeout"] +discord = ["requests"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] +markers = {main = "python_version < \"3.11\""} + +[[package]] +name = "typish" +version = "1.9.3" +description = "Functionality for types" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "typish-1.9.3-py3-none-any.whl", hash = "sha256:03cfee5e6eb856dbf90244e18f4e4c41044c8790d5779f4e775f63f982e2f896"}, +] + +[package.extras] +test = ["codecov", "coverage", "mypy", "nptyping (>=1.3.0)", "numpy", "pycodestyle", "pylint", "pytest"] + +[[package]] +name = "tzdata" +version = "2025.1" +description = "Provider of IANA time zone data" +optional = true +python-versions = ">=2" +groups = ["main"] +markers = "extra == \"all\" or extra == \"downloads\" or extra == \"examples\" or extra == \"metrics\" or extra == \"predictions\" or extra == \"snapshots\"" +files = [ + {file = "tzdata-2025.1-py2.py3-none-any.whl", hash = "sha256:7e127113816800496f027041c570f50bcd464a020098a3b6b199517772303639"}, + {file = "tzdata-2025.1.tar.gz", hash = "sha256:24894909e88cdb28bd1636c6887801df64cb485bd593f2fd83ef29075a81d694"}, +] + +[[package]] +name = "urllib3" +version = "2.3.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"}, + {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "yarl" +version = "1.18.3" +description = "Yet another URL library" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "yarl-1.18.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7df647e8edd71f000a5208fe6ff8c382a1de8edfbccdbbfe649d263de07d8c34"}, + {file = "yarl-1.18.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c69697d3adff5aa4f874b19c0e4ed65180ceed6318ec856ebc423aa5850d84f7"}, + {file = "yarl-1.18.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:602d98f2c2d929f8e697ed274fbadc09902c4025c5a9963bf4e9edfc3ab6f7ed"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c654d5207c78e0bd6d749f6dae1dcbbfde3403ad3a4b11f3c5544d9906969dde"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5094d9206c64181d0f6e76ebd8fb2f8fe274950a63890ee9e0ebfd58bf9d787b"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35098b24e0327fc4ebdc8ffe336cee0a87a700c24ffed13161af80124b7dc8e5"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3236da9272872443f81fedc389bace88408f64f89f75d1bdb2256069a8730ccc"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2c08cc9b16f4f4bc522771d96734c7901e7ebef70c6c5c35dd0f10845270bcd"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80316a8bd5109320d38eef8833ccf5f89608c9107d02d2a7f985f98ed6876990"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c1e1cc06da1491e6734f0ea1e6294ce00792193c463350626571c287c9a704db"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fea09ca13323376a2fdfb353a5fa2e59f90cd18d7ca4eaa1fd31f0a8b4f91e62"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e3b9fd71836999aad54084906f8663dffcd2a7fb5cdafd6c37713b2e72be1760"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:757e81cae69244257d125ff31663249b3013b5dc0a8520d73694aed497fb195b"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b1771de9944d875f1b98a745bc547e684b863abf8f8287da8466cf470ef52690"}, + {file = "yarl-1.18.3-cp310-cp310-win32.whl", hash = "sha256:8874027a53e3aea659a6d62751800cf6e63314c160fd607489ba5c2edd753cf6"}, + {file = "yarl-1.18.3-cp310-cp310-win_amd64.whl", hash = "sha256:93b2e109287f93db79210f86deb6b9bbb81ac32fc97236b16f7433db7fc437d8"}, + {file = "yarl-1.18.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8503ad47387b8ebd39cbbbdf0bf113e17330ffd339ba1144074da24c545f0069"}, + {file = "yarl-1.18.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:02ddb6756f8f4517a2d5e99d8b2f272488e18dd0bfbc802f31c16c6c20f22193"}, + {file = "yarl-1.18.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:67a283dd2882ac98cc6318384f565bffc751ab564605959df4752d42483ad889"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d980e0325b6eddc81331d3f4551e2a333999fb176fd153e075c6d1c2530aa8a8"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b643562c12680b01e17239be267bc306bbc6aac1f34f6444d1bded0c5ce438ca"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c017a3b6df3a1bd45b9fa49a0f54005e53fbcad16633870104b66fa1a30a29d8"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75674776d96d7b851b6498f17824ba17849d790a44d282929c42dbb77d4f17ae"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ccaa3a4b521b780a7e771cc336a2dba389a0861592bbce09a476190bb0c8b4b3"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2d06d3005e668744e11ed80812e61efd77d70bb7f03e33c1598c301eea20efbb"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:9d41beda9dc97ca9ab0b9888cb71f7539124bc05df02c0cff6e5acc5a19dcc6e"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ba23302c0c61a9999784e73809427c9dbedd79f66a13d84ad1b1943802eaaf59"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6748dbf9bfa5ba1afcc7556b71cda0d7ce5f24768043a02a58846e4a443d808d"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0b0cad37311123211dc91eadcb322ef4d4a66008d3e1bdc404808992260e1a0e"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0fb2171a4486bb075316ee754c6d8382ea6eb8b399d4ec62fde2b591f879778a"}, + {file = "yarl-1.18.3-cp311-cp311-win32.whl", hash = "sha256:61b1a825a13bef4a5f10b1885245377d3cd0bf87cba068e1d9a88c2ae36880e1"}, + {file = "yarl-1.18.3-cp311-cp311-win_amd64.whl", hash = "sha256:b9d60031cf568c627d028239693fd718025719c02c9f55df0a53e587aab951b5"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1dd4bdd05407ced96fed3d7f25dbbf88d2ffb045a0db60dbc247f5b3c5c25d50"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7c33dd1931a95e5d9a772d0ac5e44cac8957eaf58e3c8da8c1414de7dd27c576"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b411eddcfd56a2f0cd6a384e9f4f7aa3efee14b188de13048c25b5e91f1640"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:436c4fc0a4d66b2badc6c5fc5ef4e47bb10e4fd9bf0c79524ac719a01f3607c2"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e35ef8683211db69ffe129a25d5634319a677570ab6b2eba4afa860f54eeaf75"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84b2deecba4a3f1a398df819151eb72d29bfeb3b69abb145a00ddc8d30094512"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e5a1fea0fd4f5bfa7440a47eff01d9822a65b4488f7cff83155a0f31a2ecba"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0e883008013c0e4aef84dcfe2a0b172c4d23c2669412cf5b3371003941f72bb"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5a3f356548e34a70b0172d8890006c37be92995f62d95a07b4a42e90fba54272"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ccd17349166b1bee6e529b4add61727d3f55edb7babbe4069b5764c9587a8cc6"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b958ddd075ddba5b09bb0be8a6d9906d2ce933aee81100db289badbeb966f54e"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c7d79f7d9aabd6011004e33b22bc13056a3e3fb54794d138af57f5ee9d9032cb"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4891ed92157e5430874dad17b15eb1fda57627710756c27422200c52d8a4e393"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ce1af883b94304f493698b00d0f006d56aea98aeb49d75ec7d98cd4a777e9285"}, + {file = "yarl-1.18.3-cp312-cp312-win32.whl", hash = "sha256:f91c4803173928a25e1a55b943c81f55b8872f0018be83e3ad4938adffb77dd2"}, + {file = "yarl-1.18.3-cp312-cp312-win_amd64.whl", hash = "sha256:7e2ee16578af3b52ac2f334c3b1f92262f47e02cc6193c598502bd46f5cd1477"}, + {file = "yarl-1.18.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:90adb47ad432332d4f0bc28f83a5963f426ce9a1a8809f5e584e704b82685dcb"}, + {file = "yarl-1.18.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:913829534200eb0f789d45349e55203a091f45c37a2674678744ae52fae23efa"}, + {file = "yarl-1.18.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ef9f7768395923c3039055c14334ba4d926f3baf7b776c923c93d80195624782"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a19f62ff30117e706ebc9090b8ecc79aeb77d0b1f5ec10d2d27a12bc9f66d0"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e17c9361d46a4d5addf777c6dd5eab0715a7684c2f11b88c67ac37edfba6c482"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a74a13a4c857a84a845505fd2d68e54826a2cd01935a96efb1e9d86c728e186"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41f7ce59d6ee7741af71d82020346af364949314ed3d87553763a2df1829cc58"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f52a265001d830bc425f82ca9eabda94a64a4d753b07d623a9f2863fde532b53"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:82123d0c954dc58db301f5021a01854a85bf1f3bb7d12ae0c01afc414a882ca2"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2ec9bbba33b2d00999af4631a3397d1fd78290c48e2a3e52d8dd72db3a067ac8"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:fbd6748e8ab9b41171bb95c6142faf068f5ef1511935a0aa07025438dd9a9bc1"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:877d209b6aebeb5b16c42cbb377f5f94d9e556626b1bfff66d7b0d115be88d0a"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b464c4ab4bfcb41e3bfd3f1c26600d038376c2de3297760dfe064d2cb7ea8e10"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d39d351e7faf01483cc7ff7c0213c412e38e5a340238826be7e0e4da450fdc8"}, + {file = "yarl-1.18.3-cp313-cp313-win32.whl", hash = "sha256:61ee62ead9b68b9123ec24bc866cbef297dd266175d53296e2db5e7f797f902d"}, + {file = "yarl-1.18.3-cp313-cp313-win_amd64.whl", hash = "sha256:578e281c393af575879990861823ef19d66e2b1d0098414855dd367e234f5b3c"}, + {file = "yarl-1.18.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:61e5e68cb65ac8f547f6b5ef933f510134a6bf31bb178be428994b0cb46c2a04"}, + {file = "yarl-1.18.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe57328fbc1bfd0bd0514470ac692630f3901c0ee39052ae47acd1d90a436719"}, + {file = "yarl-1.18.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a440a2a624683108a1b454705ecd7afc1c3438a08e890a1513d468671d90a04e"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09c7907c8548bcd6ab860e5f513e727c53b4a714f459b084f6580b49fa1b9cee"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4f6450109834af88cb4cc5ecddfc5380ebb9c228695afc11915a0bf82116789"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9ca04806f3be0ac6d558fffc2fdf8fcef767e0489d2684a21912cc4ed0cd1b8"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77a6e85b90a7641d2e07184df5557132a337f136250caafc9ccaa4a2a998ca2c"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6333c5a377c8e2f5fae35e7b8f145c617b02c939d04110c76f29ee3676b5f9a5"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0b3c92fa08759dbf12b3a59579a4096ba9af8dd344d9a813fc7f5070d86bbab1"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4ac515b860c36becb81bb84b667466885096b5fc85596948548b667da3bf9f24"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:045b8482ce9483ada4f3f23b3774f4e1bf4f23a2d5c912ed5170f68efb053318"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a4bb030cf46a434ec0225bddbebd4b89e6471814ca851abb8696170adb163985"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:54d6921f07555713b9300bee9c50fb46e57e2e639027089b1d795ecd9f7fa910"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1d407181cfa6e70077df3377938c08012d18893f9f20e92f7d2f314a437c30b1"}, + {file = "yarl-1.18.3-cp39-cp39-win32.whl", hash = "sha256:ac36703a585e0929b032fbaab0707b75dc12703766d0b53486eabd5139ebadd5"}, + {file = "yarl-1.18.3-cp39-cp39-win_amd64.whl", hash = "sha256:ba87babd629f8af77f557b61e49e7c7cac36f22f871156b91e10a6e9d4f829e9"}, + {file = "yarl-1.18.3-py3-none-any.whl", hash = "sha256:b57f4f58099328dfb26c6a771d09fb20dbbae81d20cfb66141251ea063bd101b"}, + {file = "yarl-1.18.3.tar.gz", hash = "sha256:ac1801c45cbf77b6c99242eeff4fffb5e4e73a800b5c4ad4fc0be5def634d2e1"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" +propcache = ">=0.2.0" + +[extras] +all = ["pandas", "plotly", "tqdm"] +downloads = ["pandas", "tqdm"] +examples = ["pandas"] +metrics = ["pandas", "plotly"] +predictions = ["pandas"] +snapshots = ["pandas"] + +[metadata] +lock-version = "2.1" +python-versions = ">=3.10" +content-hash = "07acdc14a2bdadb456e7156e03d04fda1bacd330bb548abe40bd08d96efc1a2f" diff --git a/pyproject.toml b/pyproject.toml index cbb7c570..10c068fb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,32 +1,76 @@ [build-system] -requires = [ - "flit_core >=2,<4", - ] -requires-python = ">=3.6" -build-backend = "flit_core.buildapi" - -[tool.flit.metadata] -module = "indico_toolkit" -author = "indico" -author-email = "scott.levin@indico.io" -home-page = "https://github.com/IndicoDataSolutions/Indico-Solutions-Toolkit" -classifiers = [ "License :: OSI Approved :: MIT License",] -description-file = "README.md" -requires = [ - "indico-client>=6.1.0", - "plotly==5.2.1", - "tqdm==4.50.0", - "faker==13.3.3", - "python-dateutil==2.8.1" -] +requires = ["poetry-core>=2.0.0,<3.0.0"] +build-backend = "poetry.core.masonry.api" -[tool.flit.metadata.requires-extra] -test = [ - "pytest==8.3.4", - "pytest-asyncio==0.25.2", - "pytest-dependency==0.6.0", - "requests-mock>=1.7.0-7" -] -full = [ - "spacy>=3.1.4,<4" +[project] +name = "indico-toolkit" +description = "Classes, functions, and abstractions for Indico IPA" +license = "MIT" +authors = [ + { name = "Indico Solutions Engineering", email = "solutionsengineering@indicodata.ai" }, ] +readme = "README.md" +urls = { source = "https://github.com/IndicoDataSolutions/Indico-Solutions-Toolkit" } +requires-python = ">=3.10" +version = "6.14.0" +dependencies = ["indico-client (>=6.14.0,<7.0.0)"] + +[project.optional-dependencies] +all = ["pandas (>=2.2.3,<3.0.0)", "plotly (>=5.2.1,<6.0.0)", "tqdm (>=4.50.0,<5.0.0)"] +downloads = ["pandas (>=2.2.3,<3.0.0)", "tqdm (>=4.50.0,<5.0.0)"] +examples = ["pandas (>=2.2.3,<3.0.0)"] +metrics = ["pandas (>=2.2.3,<3.0.0)", "plotly (>=5.2.1,<6.0.0)"] +predictions = ["pandas (>=2.2.3,<3.0.0)"] +snapshots = ["pandas (>=2.2.3,<3.0.0)"] + +[tool.poetry.group.dev.dependencies] +black = "^25.1.0" +coverage = "^7.6.12" +mypy = "^1.15.0" +poethepoet = "^0.32.2" +pytest = "^8.3.4" +pytest-asyncio = "^0.25.3" +pytest-cov = "^6.0.0" +pytest-mock = "^3.14.0" +requests-mock = "^1.12.1" +ruff = "^0.9.6" + +[tool.poe.tasks] +black = "black indico_toolkit examples tests" +black-check = "black --check indico_toolkit examples tests" +coverage = "coverage html" +mypy = "mypy indico_toolkit tests" +pytest = "pytest tests --cov=indico_toolkit" +pytest-unit = "pytest tests --ignore tests/integration --cov=indico_toolkit" +ruff = "ruff check --fix-only --exit-zero indico_toolkit examples tests" +ruff-check = "ruff check indico_toolkit examples tests" +test-integration = "pytest tests/integration --cov=indico_toolkit" + +format = ["ruff", "black"] +check = ["ruff-check", "black-check", "mypy"] +test = ["pytest-unit", "coverage"] +all = ["format", "check", "test"] + +[tool.black] +line-length = 88 + +[tool.ruff] +line-length = 88 +lint.select = ["E", "F", "I"] +lint.fixable = ["I"] + +[tool.mypy] +strict = true +show_error_codes = true +warn_unreachable = true +disallow_any_unimported = true + +[tool.pytest.ini_options] +asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "function" + +[tool.coverage.run] +branch = true + +[tool.coverage.report] +exclude_lines = ["pragma: no cover", "@abstractmethod"] diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index f2cb2f01..00000000 --- a/requirements.txt +++ /dev/null @@ -1,12 +0,0 @@ -indico-client>=6.1.0 -python-dateutil==2.8.1 -pytz==2021.1 -pytest==8.3.4 -pytest-asyncio==0.25.2 -pytest-dependency==0.6.0 -pytest-mock==3.11.1 -coverage==5.5 -black==22.3 -plotly==5.2.1 -tqdm==4.50.0 -faker==13.3.3 diff --git a/setup.py b/setup.py deleted file mode 100644 index c72d613f..00000000 --- a/setup.py +++ /dev/null @@ -1,24 +0,0 @@ -from setuptools import find_packages, setup - -setup( - name="indico-toolkit", - version="6.1.0", - packages=find_packages(exclude=["tests"]), - description="""Tools to assist with Indico IPA development""", - license="MIT License (See LICENSE)", - author="indico", - author_email="engineering@indico.io", - tests_require=[ - "pytest>=5.2.1", - "requests-mock>=1.7.0-7", - "pytest-dependency==0.5.1", - ], - install_requires=[ - "indico-client>=5.1.4", - "plotly==5.2.1", - "tqdm==4.50.0", - "faker==13.3.3", - "python-dateutil==2.8.1" - ], - extras_require={"full": ["PyMuPDF==1.19.6", "spacy>=3.1.4,<4"]}, -) diff --git a/tests/association/conftest.py b/tests/association/conftest.py index 95b86bc1..a32b058f 100644 --- a/tests/association/conftest.py +++ b/tests/association/conftest.py @@ -1,7 +1,7 @@ -import os import json -import pytest +import os +import pytest FILE_PATH = os.path.dirname(os.path.abspath(__file__)) diff --git a/tests/association/test_extracted_tokens.py b/tests/association/test_extracted_tokens.py index c2683bae..fe6fd738 100644 --- a/tests/association/test_extracted_tokens.py +++ b/tests/association/test_extracted_tokens.py @@ -1,6 +1,5 @@ -import json import pytest -from collections import defaultdict + from indico_toolkit.association import ExtractedTokens diff --git a/tests/association/test_line_items.py b/tests/association/test_line_items.py index c1f9cab4..956fd361 100644 --- a/tests/association/test_line_items.py +++ b/tests/association/test_line_items.py @@ -1,5 +1,5 @@ import pytest -from collections import defaultdict + from indico_toolkit.association import LineItems @@ -82,13 +82,13 @@ def test_prediction_reordering(three_row_invoice_preds, three_row_invoice_tokens def test_empty_line_items_init(three_row_invoice_preds, three_row_invoice_tokens): with pytest.raises(TypeError): - LineItems(three_row_invoice_preds,) + LineItems(three_row_invoice_preds) def test_mapped_positions_by_page(three_row_invoice_preds, three_row_invoice_tokens): litems = LineItems( three_row_invoice_preds, - ("work_order_number", "line_date", "work_order_tonnage") + ("work_order_number", "line_date", "work_order_tonnage"), ) litems.get_bounding_boxes(three_row_invoice_tokens) assert isinstance(litems.mapped_positions_by_page, dict) @@ -102,8 +102,8 @@ def test_predictions_sorted_by_bbtop( ): litems = LineItems( two_row_bank_statement_preds, - ["Deposit Date", "Withdrawal Amount", "Deposit Amount", "Withdrawal Date"] - ) + ["Deposit Date", "Withdrawal Amount", "Deposit Amount", "Withdrawal Date"], + ) litems.get_bounding_boxes(two_row_bank_statement_tokens, raise_for_no_match=False) litems.assign_row_number() for row in litems.grouped_line_items: diff --git a/tests/association/test_positioning.py b/tests/association/test_positioning.py index 8bf4853c..7c398c35 100644 --- a/tests/association/test_positioning.py +++ b/tests/association/test_positioning.py @@ -1,7 +1,9 @@ -import pytest import json import os -from indico_toolkit.association import Positioning, positioning + +import pytest + +from indico_toolkit.association import Positioning from indico_toolkit.errors import ToolkitInputError @@ -22,16 +24,17 @@ def generate_mapped_pred( "page_num": page_num, } + FILE_PATH = os.path.dirname(os.path.abspath(__file__)) + @pytest.fixture(scope="function") def bbox_token_page(): - with open(os.path.join(FILE_PATH, "data/token_page/tokens.json"), - "r", - ) as f: + with open(os.path.join(FILE_PATH, "data/token_page/tokens.json"), "r") as f: tokens = json.load(f) return tokens + @pytest.mark.parametrize( "input, expected", # first pred is "above", second is "below" @@ -102,37 +105,52 @@ def test_positioned_above_same_page_false(input, expected): is_above = positioning.positioned_above(input[0], input[1], must_be_same_page=False) assert is_above == expected + @pytest.mark.parametrize( "input, expected", [ ((generate_mapped_pred(), generate_mapped_pred(11, 20, 10, 20)), False), ((generate_mapped_pred(), generate_mapped_pred(-5, 5, 1, 9)), False), ((generate_mapped_pred(), generate_mapped_pred(11, 20, 6, 15)), False), - ((generate_mapped_pred(0, 10, 10, 20), generate_mapped_pred(11, 20, 4, 15)), False), + ( + (generate_mapped_pred(0, 10, 10, 20), generate_mapped_pred(11, 20, 4, 15)), + False, + ), ((generate_mapped_pred(), generate_mapped_pred(11, 20, 1, 9)), True), ((generate_mapped_pred(), generate_mapped_pred(11, 20, 4, 15)), True), - ((generate_mapped_pred(0, 10, 10, 20), generate_mapped_pred(11, 20, 5, 15)), True), - ] + ( + (generate_mapped_pred(0, 10, 10, 20), generate_mapped_pred(11, 20, 5, 15)), + True, + ), + ], ) def test_positioned_above_overlap_same_page_true(input, expected): position = Positioning() - output = position.positioned_above_overlap(input[0], input[1], min_overlap_percent=.5) + output = position.positioned_above_overlap( + input[0], input[1], min_overlap_percent=0.5 + ) assert output == expected + def test_positioned_above_overlap_same_page_false(): position = Positioning() with pytest.raises(ToolkitInputError): position.positioned_above_overlap( generate_mapped_pred(page_num=1), generate_mapped_pred(), - min_overlap_percent=.5 + min_overlap_percent=0.5, ) + + @pytest.mark.parametrize( "input, expected", [ ((generate_mapped_pred(), generate_mapped_pred(-5, 5, 1, 9)), False), - ((generate_mapped_pred(0, 10, 10, 20), generate_mapped_pred(11, 20, 4, 15)), True), - ] + ( + (generate_mapped_pred(0, 10, 10, 20), generate_mapped_pred(11, 20, 4, 15)), + True, + ), + ], ) def test_positioned_above_overlap_min_overlap_percent_none(input, expected): position = Positioning() @@ -177,10 +195,10 @@ def test_positioned_on_same_level(input, expected): def test_positioned_on_same_level_must_be_same_page(): position = Positioning() - output = position.positioned_on_same_level( + assert not position.positioned_on_same_level( generate_mapped_pred(), generate_mapped_pred(page_num=1) ) - assert output == False + @pytest.mark.parametrize( "input, expected", @@ -229,8 +247,14 @@ def test_get_horizontal_overlap_different_pages(): @pytest.mark.parametrize( "input, expected", [ - ((generate_mapped_pred(0, 10, 0, 10), generate_mapped_pred(10, 20, 20, 40)), 0.0), - ((generate_mapped_pred(0, 10, 10, 20), generate_mapped_pred(10, 20, 0, 10)), 0.0), + ( + (generate_mapped_pred(0, 10, 0, 10), generate_mapped_pred(10, 20, 20, 40)), + 0.0, + ), + ( + (generate_mapped_pred(0, 10, 10, 20), generate_mapped_pred(10, 20, 0, 10)), + 0.0, + ), ((generate_mapped_pred(0, 10, 0, 10), generate_mapped_pred(10, 20, 0, 5)), 1.0), ((generate_mapped_pred(0, 10, 0, 5), generate_mapped_pred(10, 20, 0, 10)), 0.5), ], @@ -253,8 +277,14 @@ def test_get_vertical_overlap_different_pages(): @pytest.mark.parametrize( "input, expected", [ - ((generate_mapped_pred(0, 10, 0, 10), generate_mapped_pred(20, 40, 10, 20)), 0.0), - ((generate_mapped_pred(10, 20, 0, 10), generate_mapped_pred(0, 10, 10, 20)), 0.0), + ( + (generate_mapped_pred(0, 10, 0, 10), generate_mapped_pred(20, 40, 10, 20)), + 0.0, + ), + ( + (generate_mapped_pred(10, 20, 0, 10), generate_mapped_pred(0, 10, 10, 20)), + 0.0, + ), ((generate_mapped_pred(0, 10, 0, 10), generate_mapped_pred(0, 5, 10, 20)), 1.0), ((generate_mapped_pred(0, 5, 0, 10), generate_mapped_pred(0, 10, 10, 20)), 0.5), ], @@ -317,6 +347,7 @@ def test_manhatan_distance_between_points(input, expected): distance = Positioning.manhattan_distance_between_points(input[0], input[1]) assert round(distance, 2) == expected + def test_get_tokens_within_bounds(bbox_token_page): box = generate_mapped_pred(300, 360, 290, 450, page_num=0) positioning = Positioning() @@ -324,26 +355,32 @@ def test_get_tokens_within_bounds(bbox_token_page): assert len(bounds) == 1 assert "true" in bounds[0]["text"] + def test_get_tokens_within_bounds_excludes_overlap(bbox_token_page): box = generate_mapped_pred(300, 360, 290, 450, page_num=0) positioning = Positioning() bounds = positioning.get_tokens_within_bounds(box, bbox_token_page) assert "false" not in bounds[0]["text"] and "edge" not in bounds[0]["text"] + def test_get_tokens_within_bounds_includes_overlap(bbox_token_page): box = generate_mapped_pred(300, 360, 290, 450, page_num=0) positioning = Positioning() - edges = positioning.get_tokens_within_bounds(box, bbox_token_page, include_overlap=True) + edges = positioning.get_tokens_within_bounds( + box, bbox_token_page, include_overlap=True + ) assert len(edges) == 2 for token in edges: assert "true" in token["text"] or "edge" in token["text"] -def test_get_tokens_within_bounds_throws_error(bbox_token_page): + +def test_get_tokens_null_bounds(bbox_token_page): null_box = generate_mapped_pred() positioning = Positioning() null = positioning.get_tokens_within_bounds(null_box, bbox_token_page) assert null == [] + def test_get_tokens_within_bounds_throws_error(): positioning = Positioning() with pytest.raises(ToolkitInputError): diff --git a/tests/association/test_splitting.py b/tests/association/test_splitting.py index c89df5e4..74be5516 100644 --- a/tests/association/test_splitting.py +++ b/tests/association/test_splitting.py @@ -1,5 +1,3 @@ -import pytest - from indico_toolkit.association import split_prediction diff --git a/tests/conftest.py b/tests/conftest.py index f140319e..6c0b1cf1 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,207 +1,18 @@ -import os -import pytest -from indico.queries import ( - CreateDataset, - AddModelGroupComponent, - NewLabelsetArguments, - GetWorkflow, - GetDataset, - JobStatus, - DocumentExtraction, - RetrieveStorageObject, -) -from indico import IndicoClient -from indico.errors import IndicoRequestError -from indico_toolkit import create_client -from indico_toolkit.indico_wrapper import ( - Workflow, - DocExtraction, -) -from indico_toolkit.structure.create_structure import Structure -from indico_toolkit.structure.utils import ModelTaskType - - -FILE_PATH = os.path.dirname(os.path.abspath(__file__)) - -# The following ENV Variables must be set -HOST_URL = os.environ.get("HOST_URL") -API_TOKEN_PATH = os.environ.get("API_TOKEN_PATH") -API_TOKEN = os.environ.get("API_TOKEN") - -# the following five env variables are associated as part of same extraction workflow based on -# financial disclosure CSV snapshot and associated workflow -DATASET_ID = os.environ.get("DATASET_ID") -WORKFLOW_ID = os.environ.get("WORKFLOW_ID") -TEACH_TASK_ID = os.environ.get("TEACH_TASK_ID") -MODEL_GROUP_ID = os.environ.get("MODEL_GROUP_ID") -MODEL_ID = os.environ.get("MODEL_ID") -MODEL_NAME = os.environ.get("MODEL_NAME", "Solutions Toolkit Test Model") - -PDF_DATASET_ID = os.environ.get("PDF_DATASET_ID") - - -@pytest.fixture(scope="session") -def indico_client() -> IndicoClient: - return create_client(HOST_URL, API_TOKEN_PATH, API_TOKEN) - - -@pytest.fixture(scope="session") -def testdir_file_path(): - return FILE_PATH - - -@pytest.fixture(scope="session") -def pdf_filepath(): - return os.path.join(FILE_PATH, "data/samples/fin_disc.pdf") - - -@pytest.fixture(scope="session") -def dataset_obj(indico_client): - if not DATASET_ID: - dataset = indico_client.call( - CreateDataset( - name="Solutions Toolkit Test Dataset", - files=[os.path.join(FILE_PATH, "data/samples/fin_disc_snapshot.csv")], - ) - ) - else: - try: - dataset = indico_client.call(GetDataset(id=DATASET_ID)) - except IndicoRequestError: - raise ValueError( - f"Dataset with ID {DATASET_ID} does not exist or you do not have access to it" - ) - return dataset - - -@pytest.fixture(scope="session") -def workflow_id(indico_client, dataset_obj): - global WORKFLOW_ID - if not WORKFLOW_ID: - structure = Structure(indico_client) - workflow = structure.create_workflow(name="Solutions Toolkit Test Workflow", dataset_id=dataset_obj.id) - target_names = [ - "", - "Asset Value", - "Date of Appointment", - "Department", - "Income Amount", - "Liability Amount", - "Liability Type", - "Name", - "Position", - "Previous Organization", - "Previous Position" - ] - - workflow = structure.add_teach_task( - task_name="Teach Task Name", - labelset_name="Extraction Labelset", - target_names=target_names, - dataset_id=dataset_obj.id, - workflow_id=workflow.id, - model_type="annotation", - data_column="text" - ) - WORKFLOW_ID = workflow.id - else: - try: - indico_client.call(GetWorkflow(workflow_id=WORKFLOW_ID)) - except IndicoRequestError: - raise ValueError( - f"Workflow with ID {WORKFLOW_ID} does not exist or you do not have access to it" - ) - return WORKFLOW_ID - - -@pytest.fixture(scope="session") -def teach_task_id(): - return int(TEACH_TASK_ID) - +from pathlib import Path -@pytest.fixture(scope="session") -def extraction_model_group_id(): - return int(MODEL_GROUP_ID) - - -@pytest.fixture(scope="session") -def extraction_model_id(): - return int(MODEL_ID) - - -@pytest.fixture(scope="module") -def module_submission_ids(workflow_id, indico_client, pdf_filepath): - workflow_wrapper = Workflow(indico_client) - sub_ids = workflow_wrapper.submit_documents_to_workflow(workflow_id, [pdf_filepath]) - workflow_wrapper.wait_for_submissions_to_process(sub_ids) - return sub_ids - - -@pytest.fixture(scope="function") -def function_submission_ids(workflow_id, indico_client, pdf_filepath): - workflow_wrapper = Workflow(indico_client) - sub_ids = workflow_wrapper.submit_documents_to_workflow(workflow_id, [pdf_filepath]) - workflow_wrapper.wait_for_submissions_to_process(sub_ids) - return sub_ids - - -@pytest.fixture(scope="module") -def wflow_submission_result(indico_client, module_submission_ids) -> dict: - workflow_wrapper = Workflow(indico_client) - return workflow_wrapper.get_submission_results_from_ids( - [module_submission_ids[0]], - )[0] - - -@pytest.fixture(scope="session") -def model_name(): - return MODEL_NAME - - -@pytest.fixture(scope="session") -def ondoc_ocr_object(indico_client, pdf_filepath): - job = indico_client.call( - DocumentExtraction( - files=[pdf_filepath], json_config={"preset_config": "ondocument"} - ) - ) - job = indico_client.call(JobStatus(id=job[0].id, wait=True)) - extracted_data = indico_client.call(RetrieveStorageObject(job.result)) - return extracted_data - - -@pytest.fixture(scope="session") -def standard_ocr_object(indico_client, pdf_filepath): - # TODO: this can be static-- probably should be "ondoc" as well - job = indico_client.call( - DocumentExtraction( - files=[pdf_filepath], json_config={"preset_config": "standard"} - ) - ) - job = indico_client.call(JobStatus(id=job[0].id, wait=True)) - extracted_data = indico_client.call(RetrieveStorageObject(job.result)) - return extracted_data - - -@pytest.fixture(scope="session") -def doc_extraction_standard(indico_client): - return DocExtraction(indico_client) +import pytest @pytest.fixture(scope="session") -def snapshot_csv_path(testdir_file_path): - return os.path.join(testdir_file_path, "data/snapshots/updated_snapshot.csv") +def tests_folder() -> Path: + return Path(__file__).parent @pytest.fixture(scope="session") -def old_snapshot_csv_path(testdir_file_path): - return os.path.join(testdir_file_path, "data/snapshots/snapshot.csv") - +def snapshot_file(tests_folder: Path) -> Path: + return tests_folder / "data/snapshots/updated_snapshot.csv" -@pytest.fixture(scope="session") -def populator_snapshot_csv_path(testdir_file_path): - return os.path.join(testdir_file_path, "data/snapshots/populator_snapshot.csv") @pytest.fixture(scope="session") -def pdf_dataset_obj(indico_client): - return indico_client.call(GetDataset(PDF_DATASET_ID)) +def old_snapshot_file(tests_folder: Path) -> Path: + return tests_folder / "data/snapshots/snapshot.csv" diff --git a/tests/indico_wrapper/test_doc_extraction.py b/tests/indico_wrapper/test_doc_extraction.py deleted file mode 100644 index d76672e6..00000000 --- a/tests/indico_wrapper/test_doc_extraction.py +++ /dev/null @@ -1,59 +0,0 @@ -from indico_toolkit.ocr import StandardOcr, OnDoc -from indico_toolkit.indico_wrapper import DocExtraction - - -def test_run_ocr_ondoc(indico_client, pdf_filepath): - doc_extraction_ondoc = DocExtraction(indico_client, preset_config="ondocument") - extracted_data = doc_extraction_ondoc.run_ocr(filepaths=[pdf_filepath]) - for item in extracted_data: - assert isinstance(item, OnDoc) - - -def test_run_ocr_standard(doc_extraction_standard, pdf_filepath): - extracted_data = doc_extraction_standard.run_ocr(filepaths=[pdf_filepath]) - for item in extracted_data: - assert isinstance(item, StandardOcr) - - -def test_run_ocr_standard_full_text(doc_extraction_standard, pdf_filepath): - full_text_result = doc_extraction_standard.run_ocr( - filepaths=[pdf_filepath], text_setting="full_text" - ) - assert len(full_text_result[0]) == 2062 - - -def test_run_ocr_standard_page_texts(doc_extraction_standard, pdf_filepath): - page_texts_result = doc_extraction_standard.run_ocr( - filepaths=[pdf_filepath], text_setting="page_texts" - ) - assert len(page_texts_result[0][0]) == 1153 - - -def test_run_ocr_custom_full_text(indico_client, pdf_filepath): - doc_extraction_custom = DocExtraction(indico_client, custom_config={ - "top_level": "page", - "nest": False, - "reblocking": ["style", "list", "inline-header"], - "pages": ["text", "size", "dpi", "doc_offset", "page_num", "image", "thumbnail"], - "blocks": ["text", "doc_offset", "page_offset", "position", "block_type", "page_num"], - "tokens": ["text", "doc_offset", "page_offset", "block_offset", "position", "page_num", "style"], - "chars": ["text", "doc_index", "block_index", "page_index", "page_num", "position"]}) - full_text_result = doc_extraction_custom.run_ocr( - filepaths=[pdf_filepath], text_setting="full_text" - ) - assert len(full_text_result[0]) == 2067 - - -def test_run_ocr_custom_page_texts(indico_client, pdf_filepath): - doc_extraction_custom = DocExtraction(indico_client, custom_config={ - "top_level": "page", - "nest": False, - "reblocking": ["style", "list", "inline-header"], - "pages": ["text", "size", "dpi", "doc_offset", "page_num", "image", "thumbnail"], - "blocks": ["text", "doc_offset", "page_offset", "position", "block_type", "page_num"], - "tokens": ["text", "doc_offset", "page_offset", "block_offset", "position", "page_num", "style"], - "chars": ["text", "doc_index", "block_index", "page_index", "page_num", "position"]}) - page_texts_result = doc_extraction_custom.run_ocr( - filepaths=[pdf_filepath], text_setting="page_texts" - ) - assert len(page_texts_result[0][0]) == 1158 diff --git a/tests/indico_wrapper/test_download.py b/tests/indico_wrapper/test_download.py deleted file mode 100644 index 5ae2bb25..00000000 --- a/tests/indico_wrapper/test_download.py +++ /dev/null @@ -1,37 +0,0 @@ -import pytest -import pandas as pd -import tempfile -import os -from indico.types import Dataset -from indico_toolkit.indico_wrapper import Download, Datasets - - -@pytest.fixture(scope="module") -def downloader(indico_client): - return Download(indico_client) - - -@pytest.fixture(scope="module") -def snap_dset_id(dataset_obj): - return dataset_obj.id - - -def test_get_uploaded_csv_dataframe(downloader: Download, snap_dset_id: int): - df = downloader.get_uploaded_csv_dataframe(snap_dset_id) - assert isinstance(df, pd.DataFrame) - assert "question_1620" in df.columns, "Missing column from uploaded CSV" - - -def test_download_export(downloader: Download, dataset_obj: Dataset): - df = downloader.get_snapshot_dataframe(dataset_obj.id, dataset_obj.labelsets[0].id) - assert isinstance(df, pd.DataFrame) - assert isinstance(df["text"][0], str) - - -def test_download_pdfs(downloader: Download, pdf_dataset_obj: Dataset): - with tempfile.TemporaryDirectory() as tmpdir: - num_files = downloader.get_dataset_pdfs( - pdf_dataset_obj.id, pdf_dataset_obj.labelsets[0].id, tmpdir, max_files_to_download=1 - ) - num_files_downloaded = len(os.listdir(tmpdir)) - assert num_files == num_files_downloaded diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 00000000..b0c3d356 --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,210 @@ +from pathlib import Path + +import pytest +from indico import IndicoClient, IndicoConfig +from indico.queries import ( + AddModelGroupComponent, + CreateDataset, + CreateWorkflow, + DocumentExtraction, + GetTrainingModelWithProgress, + GraphQLRequest, + JobStatus, + RetrieveStorageObject, +) + +from indico_toolkit.indico_wrapper import DocExtraction, Workflow + + +def pytest_addoption(parser: pytest.Parser) -> None: + parser.addoption( + "--host", + required=True, + help="Specify the host URL for integration tests", + ) + parser.addoption( + "--token", + required=True, + help="Specify the API token (string or path) for integration tests", + ) + + +@pytest.fixture(scope="session") +def host(request: pytest.FixtureRequest) -> str: + return request.config.getoption("--host") + + +@pytest.fixture(scope="session") +def token(request: pytest.FixtureRequest) -> str: + return request.config.getoption("--token") + + +@pytest.fixture(scope="session") +def indico_client(host: str, token: str) -> IndicoClient: + if Path(token).is_file(): + return IndicoClient(IndicoConfig(host=host, api_token_path=token)) + else: + return IndicoClient(IndicoConfig(host=host, api_token=token)) + + +@pytest.fixture(scope="session") +def dataset(indico_client, tests_folder): + return indico_client.call( + CreateDataset( + name="Toolkit Integration Tests", + files=[tests_folder / "data/samples/fin_disc_snapshot.csv"], + ) + ) + + +@pytest.fixture(scope="session") +def dataset_id(dataset): + return dataset.id + + +@pytest.fixture(scope="session") +def doc_extraction_standard(indico_client): + return DocExtraction(indico_client) + + +@pytest.fixture(scope="session") +def extraction_model_group_id(workflow): + return workflow.components[-1].model_group.id + + +@pytest.fixture(scope="session") +def extraction_model_id(workflow): + return workflow.components[-1].model_group.selected_model.id + + +@pytest.fixture(scope="function") +def function_submission_ids(workflow_id, indico_client, pdf_file): + workflow_wrapper = Workflow(indico_client) + sub_ids = workflow_wrapper.submit_documents_to_workflow( + workflow_id, files=[pdf_file] + ) + workflow_wrapper.wait_for_submissions_to_process(sub_ids) + return sub_ids + + +@pytest.fixture(scope="session") +def model_name(workflow): + return workflow.components[-1].model_group.name + + +@pytest.fixture(scope="module") +def module_submission_ids(workflow_id, indico_client, pdf_file): + workflow_wrapper = Workflow(indico_client) + sub_ids = workflow_wrapper.submit_documents_to_workflow( + workflow_id, files=[pdf_file] + ) + workflow_wrapper.wait_for_submissions_to_process(sub_ids) + return sub_ids + + +@pytest.fixture(scope="session") +def ondoc_ocr_object(indico_client, pdf_file): + job = indico_client.call( + DocumentExtraction( + files=[pdf_file], json_config={"preset_config": "ondocument"} + ) + ) + job = indico_client.call(JobStatus(id=job[0].id, wait=True)) + extracted_data = indico_client.call(RetrieveStorageObject(job.result)) + return extracted_data + + +@pytest.fixture(scope="session") +def pdf_file(tests_folder: Path) -> Path: + return tests_folder / "data/samples/fin_disc.pdf" + + +@pytest.fixture(scope="session") +def populator_snapshot_file(tests_folder: Path) -> Path: + return tests_folder / "data/snapshots/populator_snapshot.csv" + + +@pytest.fixture(scope="session") +def standard_ocr_object(indico_client, pdf_file): + # TODO: this can be static-- probably should be "ondoc" as well + job = indico_client.call( + DocumentExtraction(files=[pdf_file], json_config={"preset_config": "standard"}) + ) + job = indico_client.call(JobStatus(id=job[0].id, wait=True)) + extracted_data = indico_client.call(RetrieveStorageObject(job.result)) + return extracted_data + + +@pytest.fixture(scope="session") +def teach_task_id(workflow): + return workflow.components[-1].model_group.questionnaire_id + + +@pytest.fixture(scope="module") +def wflow_submission_result(indico_client, module_submission_ids): + workflow_wrapper = Workflow(indico_client) + return workflow_wrapper.get_submission_results_from_ids( + [module_submission_ids[0]], + )[0] + + +@pytest.fixture(scope="session") +def workflow(indico_client, dataset): + workflow = indico_client.call( + CreateWorkflow( + dataset.id, + name="Toolkit Integration Tests", + ) + ) + workflow = indico_client.call( + AddModelGroupComponent( + workflow_id=workflow.id, + dataset_id=dataset.id, + name="Toolkit Integration Tests", + source_column_id=dataset.datacolumn_by_name("text").id, + labelset_column_id=dataset.labelset_by_name("question_1620").id, + after_component_id=workflow.components[0].id, + ) + ) + + while True: + training = indico_client.call( + GetTrainingModelWithProgress(workflow.components[-1].model_group.id) + ) + + if training.status not in ("CREATED", "TRAINING"): + break + + indico_client.call( + GraphQLRequest( + """ + mutation AddWorkflowComponent( + $component: JSONString! + $workflowId: Int! + $afterComponentId: Int + ) { + addWorkflowComponent( + component: $component + workflowId: $workflowId + afterComponentId: $afterComponentId + ) { + workflow { + id + } + } + } + """, + { + "component": '{"component_type": "default_output", "config": {}}', + "workflowId": workflow.id, + "afterComponentId": workflow.components[1].id, + }, + ) + ) + + return workflow + + +@pytest.fixture(scope="session") +def workflow_id(workflow): + return workflow.id diff --git a/tests/indico_wrapper/__init__.py b/tests/integration/indico_wrapper/__init__.py similarity index 100% rename from tests/indico_wrapper/__init__.py rename to tests/integration/indico_wrapper/__init__.py diff --git a/tests/indico_wrapper/test_dataset.py b/tests/integration/indico_wrapper/test_dataset.py similarity index 72% rename from tests/indico_wrapper/test_dataset.py rename to tests/integration/indico_wrapper/test_dataset.py index 16ed69d5..2b785071 100644 --- a/tests/indico_wrapper/test_dataset.py +++ b/tests/integration/indico_wrapper/test_dataset.py @@ -1,28 +1,25 @@ """ Test Datasets class methods """ + import pytest -from indico_toolkit.indico_wrapper import Datasets from indico.types import Dataset +from indico_toolkit.indico_wrapper import Datasets + @pytest.fixture(scope="module") def dataset_wrapper(indico_client): return Datasets(indico_client) -@pytest.fixture(scope="module") -def dataset_id(dataset_obj): - return dataset_obj.id - - def test_get_dataset(dataset_wrapper, dataset_id): dataset = dataset_wrapper.get_dataset(dataset_id) assert isinstance(dataset, Dataset) -def test_add_to_dataset(dataset_wrapper, dataset_id, pdf_filepath): - dataset = dataset_wrapper.add_files_to_dataset(dataset_id, filepaths=[pdf_filepath]) +def test_add_to_dataset(dataset_wrapper, dataset_id, pdf_file): + dataset = dataset_wrapper.add_files_to_dataset(dataset_id, filepaths=[pdf_file]) assert isinstance(dataset, Dataset) for f in dataset.files: assert f.status in ["PROCESSED", "FAILED"] @@ -34,11 +31,10 @@ def test_get_dataset_files(dataset_wrapper, dataset_id): assert len(files_list) > 0 -def test_create_delete_dataset(dataset_wrapper, pdf_filepath): +def test_create_delete_dataset(dataset_wrapper, pdf_file): dataset = dataset_wrapper.create_dataset( - filepaths=[pdf_filepath], dataset_name="Temporary Test Dataset" + filepaths=[pdf_file], dataset_name="Toolkit Integration Tests" ) assert isinstance(dataset, Dataset) status = dataset_wrapper.delete_dataset(dataset.id) - assert status == True - + assert status diff --git a/tests/integration/indico_wrapper/test_doc_extraction.py b/tests/integration/indico_wrapper/test_doc_extraction.py new file mode 100644 index 00000000..7917f68d --- /dev/null +++ b/tests/integration/indico_wrapper/test_doc_extraction.py @@ -0,0 +1,127 @@ +from indico_toolkit.indico_wrapper import DocExtraction +from indico_toolkit.ocr import OnDoc, StandardOcr + + +def test_run_ocr_ondoc(indico_client, pdf_file): + doc_extraction_ondoc = DocExtraction(indico_client, preset_config="ondocument") + extracted_data = doc_extraction_ondoc.run_ocr(filepaths=[pdf_file]) + for item in extracted_data: + assert isinstance(item, OnDoc) + + +def test_run_ocr_standard(doc_extraction_standard, pdf_file): + extracted_data = doc_extraction_standard.run_ocr(filepaths=[pdf_file]) + for item in extracted_data: + assert isinstance(item, StandardOcr) + + +def test_run_ocr_standard_full_text(doc_extraction_standard, pdf_file): + full_text_result = doc_extraction_standard.run_ocr( + filepaths=[pdf_file], text_setting="full_text" + ) + assert len(full_text_result[0]) == 2062 + + +def test_run_ocr_standard_page_texts(doc_extraction_standard, pdf_file): + page_texts_result = doc_extraction_standard.run_ocr( + filepaths=[pdf_file], text_setting="page_texts" + ) + assert len(page_texts_result[0][0]) == 1153 + + +def test_run_ocr_custom_full_text(indico_client, pdf_file): + doc_extraction_custom = DocExtraction( + indico_client, + custom_config={ + "top_level": "page", + "nest": False, + "reblocking": ["style", "list", "inline-header"], + "pages": [ + "text", + "size", + "dpi", + "doc_offset", + "page_num", + "image", + "thumbnail", + ], + "blocks": [ + "text", + "doc_offset", + "page_offset", + "position", + "block_type", + "page_num", + ], + "tokens": [ + "text", + "doc_offset", + "page_offset", + "block_offset", + "position", + "page_num", + "style", + ], + "chars": [ + "text", + "doc_index", + "block_index", + "page_index", + "page_num", + "position", + ], + }, + ) + full_text_result = doc_extraction_custom.run_ocr( + filepaths=[pdf_file], text_setting="full_text" + ) + assert len(full_text_result[0]) == 2067 + + +def test_run_ocr_custom_page_texts(indico_client, pdf_file): + doc_extraction_custom = DocExtraction( + indico_client, + custom_config={ + "top_level": "page", + "nest": False, + "reblocking": ["style", "list", "inline-header"], + "pages": [ + "text", + "size", + "dpi", + "doc_offset", + "page_num", + "image", + "thumbnail", + ], + "blocks": [ + "text", + "doc_offset", + "page_offset", + "position", + "block_type", + "page_num", + ], + "tokens": [ + "text", + "doc_offset", + "page_offset", + "block_offset", + "position", + "page_num", + "style", + ], + "chars": [ + "text", + "doc_index", + "block_index", + "page_index", + "page_num", + "position", + ], + }, + ) + page_texts_result = doc_extraction_custom.run_ocr( + filepaths=[pdf_file], text_setting="page_texts" + ) + assert len(page_texts_result[0][0]) == 1158 diff --git a/tests/integration/indico_wrapper/test_download.py b/tests/integration/indico_wrapper/test_download.py new file mode 100644 index 00000000..0bc52e75 --- /dev/null +++ b/tests/integration/indico_wrapper/test_download.py @@ -0,0 +1,36 @@ +import os +import tempfile + +import pytest +from indico.types import Dataset + +from indico_toolkit.indico_wrapper import Download + +pd = pytest.importorskip("pandas") + + +@pytest.fixture(scope="module") +def downloader(indico_client): + return Download(indico_client) + + +def test_get_uploaded_csv_dataframe(downloader: Download, dataset_id: int): + df = downloader.get_uploaded_csv_dataframe(dataset_id) + assert isinstance(df, pd.DataFrame) + assert "question_1620" in df.columns, "Missing column from uploaded CSV" + + +def test_download_export(downloader: Download, dataset: Dataset): + df = downloader.get_snapshot_dataframe(dataset.id, dataset.labelsets[0].id) + assert isinstance(df, pd.DataFrame) + assert isinstance(df["text"][0], str) + + +@pytest.mark.skip(reason="first file is not guaranteed to be a PDF") +def test_download_pdfs(downloader: Download, dataset: Dataset): + with tempfile.TemporaryDirectory() as tmpdir: + num_files = downloader.get_dataset_pdfs( + dataset.id, dataset.labelsets[0].id, tmpdir, max_files_to_download=1 + ) + num_files_downloaded = len(os.listdir(tmpdir)) + assert num_files == num_files_downloaded diff --git a/tests/indico_wrapper/test_indico_wrapper.py b/tests/integration/indico_wrapper/test_indico_wrapper.py similarity index 73% rename from tests/indico_wrapper/test_indico_wrapper.py rename to tests/integration/indico_wrapper/test_indico_wrapper.py index 75310bd5..13141027 100644 --- a/tests/indico_wrapper/test_indico_wrapper.py +++ b/tests/integration/indico_wrapper/test_indico_wrapper.py @@ -11,8 +11,8 @@ def indico_wrapper(indico_client): @pytest.fixture(scope="module") -def storage_url(indico_wrapper, pdf_filepath): - return indico_wrapper.create_storage_urls([pdf_filepath])[0] +def storage_url(indico_wrapper, pdf_file): + return indico_wrapper.create_storage_urls([pdf_file])[0] def test_get_storage_object(indico_wrapper, storage_url): @@ -25,7 +25,7 @@ def test_get_storage_object_retry(indico_wrapper, storage_url): _ = indico_wrapper.get_storage_object(storage_url + "bad") -def test_graphQL_request(indico_wrapper, dataset_obj): +def test_graphQL_request(indico_wrapper, dataset): query = """ query getSharknadoDataset($id: Int!) { dataset(id: $id) { @@ -34,20 +34,22 @@ def test_graphQL_request(indico_wrapper, dataset_obj): } } """ - response = indico_wrapper.graphQL_request(query, {"id": dataset_obj.id}) - assert response["dataset"]["id"] == int(dataset_obj.id) + response = indico_wrapper.graphQL_request(query, {"id": dataset.id}) + assert response["dataset"]["id"] == int(dataset.id) assert response["dataset"]["status"] == "COMPLETE" def test_get_predictions_with_model_id(indico_wrapper, extraction_model_id): sample_text = ["Some random sample text written by Scott Levin from Indico"] - result = indico_wrapper.get_predictions_with_model_id(extraction_model_id, sample_text) + result = indico_wrapper.get_predictions_with_model_id( + extraction_model_id, sample_text + ) assert isinstance(result, list) assert len(result) == 1 assert isinstance(result[0], Extractions) -def test_create_storage_urls(indico_wrapper, pdf_filepath): - storage_urls = indico_wrapper.create_storage_urls([pdf_filepath]) +def test_create_storage_urls(indico_wrapper, pdf_file): + storage_urls = indico_wrapper.create_storage_urls([pdf_file]) assert len(storage_urls) == 1 assert isinstance(storage_urls[0], str) diff --git a/tests/indico_wrapper/test_modelops.py b/tests/integration/indico_wrapper/test_modelops.py similarity index 98% rename from tests/indico_wrapper/test_modelops.py rename to tests/integration/indico_wrapper/test_modelops.py index 8d399db8..555c7d77 100644 --- a/tests/indico_wrapper/test_modelops.py +++ b/tests/integration/indico_wrapper/test_modelops.py @@ -1,8 +1,9 @@ """ Test Model Ops class methods """ + import pytest -import json + from indico_toolkit.indico_wrapper.modelop import ModelOp @@ -58,7 +59,7 @@ def test_invalid_parameter(extraction_model_group_id, modelop, params): _ = modelop.update_model_settings( model_group_id=extraction_model_group_id, model_type="text_extraction", - **params + **params, ) diff --git a/tests/indico_wrapper/test_reviewer.py b/tests/integration/indico_wrapper/test_reviewer.py similarity index 86% rename from tests/indico_wrapper/test_reviewer.py rename to tests/integration/indico_wrapper/test_reviewer.py index 14235de1..9b26a4ce 100644 --- a/tests/indico_wrapper/test_reviewer.py +++ b/tests/integration/indico_wrapper/test_reviewer.py @@ -1,10 +1,10 @@ import pytest + from indico_toolkit.indico_wrapper import Reviewer, Workflow -from indico.queries import GetSubmission @pytest.fixture(scope="module") -def submissions_awaiting_review(workflow_id, indico_client, pdf_filepath): +def submissions_awaiting_review(workflow_id, indico_client, pdf_file): """ Ensure that auto review is turned off and there are two submissions "PENDING_REVIEW" """ @@ -13,7 +13,7 @@ def submissions_awaiting_review(workflow_id, indico_client, pdf_filepath): workflow_id, enable_review=True, enable_auto_review=False ) sub_ids = workflow_wrapper.submit_documents_to_workflow( - workflow_id, [pdf_filepath, pdf_filepath] + workflow_id, files=[pdf_file, pdf_file] ) workflow_wrapper.wait_for_submissions_to_process(sub_ids) @@ -22,8 +22,10 @@ def get_change_formatted_predictions(workflow_result): """ Helper function for get change format for accepted predictions in test_accept_review """ - return {workflow_result.model_name: workflow_result.predictions.to_list()} + return {workflow_result.model_name: workflow_result.get_predictions.to_list()} + +@pytest.mark.skip(reason="broken on indico-client>=6.1.0") def test_accept_review(submissions_awaiting_review, indico_client, workflow_id): reviewer_wrapper = Reviewer(indico_client, workflow_id) id_in_review = reviewer_wrapper.get_random_review_id() @@ -35,7 +37,8 @@ def test_accept_review(submissions_awaiting_review, indico_client, workflow_id): submission = reviewer_wrapper.get_submission_object(id_in_review) assert submission.status == "COMPLETE" -@pytest.mark.dependency() + +@pytest.mark.skip(reason="flaky, depends on submission processing time") def test_reject_from_review(submissions_awaiting_review, indico_client, workflow_id): reviewer_wrapper = Reviewer(indico_client, workflow_id) id_in_review = reviewer_wrapper.get_random_review_id() @@ -43,7 +46,8 @@ def test_reject_from_review(submissions_awaiting_review, indico_client, workflow submission = reviewer_wrapper.get_submission_object(id_in_review) assert submission.status == "PENDING_ADMIN_REVIEW" -@pytest.mark.dependency(depends=["test_reject_from_review"]) + +@pytest.mark.skip(reason="flaky, depends on submission processing time") def test_reject_from_admin_review( submissions_awaiting_review, indico_client, workflow_id ): diff --git a/tests/indico_wrapper/test_workflow.py b/tests/integration/indico_wrapper/test_workflow.py similarity index 76% rename from tests/indico_wrapper/test_workflow.py rename to tests/integration/indico_wrapper/test_workflow.py index 781f97f7..97128d6c 100644 --- a/tests/indico_wrapper/test_workflow.py +++ b/tests/integration/indico_wrapper/test_workflow.py @@ -1,21 +1,20 @@ -from indico_toolkit.types.extractions import Extractions -from indico import IndicoClient -from indico.types import Submission, Job -from tests.conftest import MODEL_NAME +import pytest +from indico.types import Submission + from indico_toolkit.indico_wrapper import Workflow from indico_toolkit.ocr import OnDoc -from indico_toolkit.types import WorkflowResult, Predictions +from indico_toolkit.types import WorkflowResult +from indico_toolkit.types.extractions import Extractions -def test_submit_documents_to_workflow(indico_client, pdf_filepath, workflow_id): +def test_submit_documents_to_workflow(indico_client, pdf_file, workflow_id): wflow = Workflow(indico_client) - sub_ids = wflow.submit_documents_to_workflow( - workflow_id=workflow_id, pdf_filepaths=[pdf_filepath] - ) + sub_ids = wflow.submit_documents_to_workflow(workflow_id, files=[pdf_file]) assert len(sub_ids) == 1 assert isinstance(sub_ids[0], int) +@pytest.mark.skip(reason="relies on deprecated v1 result file format") def test_get_ondoc_ocr_from_etl_url(indico_client, wflow_submission_result): wflow = Workflow(indico_client) on_doc = wflow.get_ondoc_ocr_from_etl_url(wflow_submission_result.etl_url) @@ -42,8 +41,9 @@ def test_get_submission_object(indico_client, module_submission_ids): assert isinstance(sub, Submission) +@pytest.mark.skip(reason="broken on indico-client>=6.1.0") def test_get_submission_results_from_ids(indico_client, module_submission_ids): wflow = Workflow(indico_client) result = wflow.get_submission_results_from_ids([module_submission_ids[0]])[0] assert isinstance(result, WorkflowResult) - assert isinstance(result.predictions, Extractions) + assert isinstance(result.get_predictions, Extractions) diff --git a/tests/ocr/__init__.py b/tests/integration/metrics/__init__.py similarity index 100% rename from tests/ocr/__init__.py rename to tests/integration/metrics/__init__.py diff --git a/tests/metrics/test_compare_models.py b/tests/integration/metrics/test_compare_models.py similarity index 97% rename from tests/metrics/test_compare_models.py rename to tests/integration/metrics/test_compare_models.py index 6577a9b2..46f845b3 100644 --- a/tests/metrics/test_compare_models.py +++ b/tests/integration/metrics/test_compare_models.py @@ -1,9 +1,9 @@ import pytest -import tempfile -import pandas as pd from indico_toolkit.metrics import CompareModels +pd = pytest.importorskip("pandas") + def test_get_data_df(extraction_model_group_id, extraction_model_id, indico_client): comp = CompareModels( diff --git a/tests/metrics/test_metrics.py b/tests/integration/metrics/test_metrics.py similarity index 98% rename from tests/metrics/test_metrics.py rename to tests/integration/metrics/test_metrics.py index 8c066722..a13b169e 100644 --- a/tests/metrics/test_metrics.py +++ b/tests/integration/metrics/test_metrics.py @@ -1,9 +1,11 @@ -import pytest import tempfile -import pandas as pd + +import pytest from indico_toolkit.metrics import ExtractionMetrics +pd = pytest.importorskip("pandas") + def test_get_metrics(extraction_model_group_id, indico_client): metrics = ExtractionMetrics(indico_client) diff --git a/tests/integration/ocr/__init__.py b/tests/integration/ocr/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/ocr/test_customocr_object.py b/tests/integration/ocr/test_customocr_object.py similarity index 70% rename from tests/ocr/test_customocr_object.py rename to tests/integration/ocr/test_customocr_object.py index 0754c2fb..078ca7f2 100644 --- a/tests/ocr/test_customocr_object.py +++ b/tests/integration/ocr/test_customocr_object.py @@ -1,14 +1,15 @@ import pytest + from indico_toolkit.indico_wrapper import DocExtraction -def test_full_text(indico_client, pdf_filepath): +def test_full_text(indico_client, pdf_file): doc_extraction = DocExtraction(indico_client, preset_config="simple") - custom_ocr = doc_extraction.run_ocr(filepaths=[pdf_filepath]) + custom_ocr = doc_extraction.run_ocr(filepaths=[pdf_file]) assert len(custom_ocr[0].full_text) == 2823 -def test_full_text_exception(indico_client, pdf_filepath): +def test_full_text_exception(indico_client, pdf_file): doc_extraction = DocExtraction( indico_client, custom_config={ @@ -18,12 +19,12 @@ def test_full_text_exception(indico_client, pdf_filepath): "blocks": ["text", "position", "doc_offset", "page_offset"], }, ) - custom_ocr = doc_extraction.run_ocr(filepaths=[pdf_filepath]) + custom_ocr = doc_extraction.run_ocr(filepaths=[pdf_file]) with pytest.raises(Exception): custom_ocr[0].full_text -def test_page_texts(indico_client, pdf_filepath): +def test_page_texts(indico_client, pdf_file): doc_extraction = DocExtraction( indico_client, custom_config={ @@ -34,13 +35,13 @@ def test_page_texts(indico_client, pdf_filepath): "blocks": ["text", "position", "doc_offset", "page_offset"], }, ) - custom_ocr = doc_extraction.run_ocr(filepaths=[pdf_filepath]) + custom_ocr = doc_extraction.run_ocr(filepaths=[pdf_file]) assert isinstance(custom_ocr[0].page_texts, list) assert isinstance(custom_ocr[0].page_texts[0], str) -def test_page_texts_exception(indico_client, pdf_filepath): +def test_page_texts_exception(indico_client, pdf_file): doc_extraction = DocExtraction(indico_client, preset_config="legacy") - custom_ocr = doc_extraction.run_ocr(filepaths=[pdf_filepath]) + custom_ocr = doc_extraction.run_ocr(filepaths=[pdf_file]) with pytest.raises(Exception): custom_ocr.page_texts diff --git a/tests/ocr/test_ondoc_object.py b/tests/integration/ocr/test_ondoc_object.py similarity index 99% rename from tests/ocr/test_ondoc_object.py rename to tests/integration/ocr/test_ondoc_object.py index 8ab16597..e5f49c6b 100644 --- a/tests/ocr/test_ondoc_object.py +++ b/tests/integration/ocr/test_ondoc_object.py @@ -1,4 +1,5 @@ import pytest + from indico_toolkit.ocr import OnDoc diff --git a/tests/ocr/test_standard_object.py b/tests/integration/ocr/test_standard_object.py similarity index 100% rename from tests/ocr/test_standard_object.py rename to tests/integration/ocr/test_standard_object.py diff --git a/tests/auto_review/test_auto_review.py b/tests/integration/test_auto_review.py similarity index 71% rename from tests/auto_review/test_auto_review.py rename to tests/integration/test_auto_review.py index b12ce21e..88aa7eef 100644 --- a/tests/auto_review/test_auto_review.py +++ b/tests/integration/test_auto_review.py @@ -1,22 +1,20 @@ -import os import json -import pytest import os -import json from collections import defaultdict + +import pytest from indico.queries import Job -from indico_toolkit.indico_wrapper import Workflow -from indico_toolkit.auto_review import AutoReviewFunction, AutoReviewer + +from indico_toolkit.auto_review import AutoReviewer, AutoReviewFunction from indico_toolkit.auto_review.auto_review_functions import ( + accept_by_all_match_and_confidence, accept_by_confidence, reject_by_confidence, - accept_by_all_match_and_confidence, - remove_by_confidence, reject_by_max_character_length, - reject_by_min_character_length + reject_by_min_character_length, + remove_by_confidence, ) -from tests.conftest import FILE_PATH - +from indico_toolkit.indico_wrapper import Workflow min_max_length = 6 ACCEPTED = "accepted" @@ -24,55 +22,62 @@ @pytest.fixture(scope="session") -def auto_review_preds(testdir_file_path): - with open(os.path.join(testdir_file_path, "data/auto_review/preds.json"), "r") as f: +def auto_review_preds(tests_folder): + with open(os.path.join(tests_folder, "data/auto_review/preds.json"), "r") as f: preds = json.load(f) return preds @pytest.fixture(scope="function") -def id_pending_scripted(workflow_id, indico_client, pdf_filepath): +def id_pending_scripted(workflow_id, indico_client, pdf_file): """ Ensure that auto review is turned on and there are two submissions "PENDING_REVIEW" """ wflow = Workflow(indico_client) wflow.update_workflow_settings( - workflow_id, enable_review=True, enable_auto_review=True, + workflow_id, enable_review=True, enable_auto_review=True ) - sub_id = wflow.submit_documents_to_workflow(workflow_id, [pdf_filepath]) + sub_id = wflow.submit_documents_to_workflow(workflow_id, files=[pdf_file]) wflow.wait_for_submissions_to_process(sub_id) return sub_id[0] +@pytest.mark.skip(reason="broken on indico-client>=6.1.0") def test_submit_submission_review( indico_client, id_pending_scripted, wflow_submission_result, model_name ): wflow = Workflow(indico_client) job = wflow.submit_submission_review( - id_pending_scripted, {model_name: wflow_submission_result.predictions.to_list()} + id_pending_scripted, + {model_name: wflow_submission_result.get_predictions.to_list()}, ) assert isinstance(job, Job) +@pytest.mark.skip(reason="broken on indico-client>=6.1.0") def test_submit_auto_review(indico_client, id_pending_scripted, model_name): """ - Submit a document to a workflow, auto review the predictions, and retrieve the results + Submit a document to a workflow, auto review the predictions, and retrieve the + results """ # Submit to workflow and get predictions wflow = Workflow(indico_client) result = wflow.get_submission_results_from_ids([id_pending_scripted])[0] - predictions = result.predictions.to_list() + predictions = result.get_predictions.to_list() # Review the submission functions = [ AutoReviewFunction(accept_by_confidence, kwargs={"conf_threshold": 0.99}), AutoReviewFunction( - reject_by_min_character_length, + reject_by_min_character_length, labels=["Liability Amount", "Date of Appointment"], - kwargs={"min_length_threshold": 3}), + kwargs={"min_length_threshold": 3}, + ), ] reviewer = AutoReviewer(predictions, functions) reviewer.apply_reviews() - non_rejected_pred_count = len([i for i in reviewer.updated_predictions if "rejected" not in i]) + non_rejected_pred_count = len( + [i for i in reviewer.updated_predictions if "rejected" not in i] + ) wflow.submit_submission_review( id_pending_scripted, {model_name: reviewer.updated_predictions} ) @@ -83,7 +88,7 @@ def test_submit_auto_review(indico_client, id_pending_scripted, model_name): def accept_if_match(predictions, labels: list = None, match_text: str = ""): for pred in predictions: if REJECTED not in pred: - if labels != None and pred["label"] not in labels: + if labels is not None and pred["label"] not in labels: continue if pred["text"] == match_text: pred["accepted"] = True @@ -91,7 +96,7 @@ def accept_if_match(predictions, labels: list = None, match_text: str = ""): def create_pred_label_map(predictions): - """ + """ Create dict with labels keying to list of predictions with that label """ prediction_label_map = defaultdict(list) @@ -104,74 +109,71 @@ def create_pred_label_map(predictions): def test_reviewer(auto_review_preds): custom_functions = [ AutoReviewFunction( - reject_by_confidence, + reject_by_confidence, labels=["reject_by_confidence"], - kwargs={"conf_threshold": 0.7} + kwargs={"conf_threshold": 0.7}, ), AutoReviewFunction( accept_by_all_match_and_confidence, - labels = [ - "accept_by_all_match_and_confidence", - "no_match_accept_by_all_match_and_confidence", - "low_conf_accept_by_all_match_and_confidence" + labels=[ + "accept_by_all_match_and_confidence", + "no_match_accept_by_all_match_and_confidence", + "low_conf_accept_by_all_match_and_confidence", ], - kwargs={"conf_threshold": 0.9} + kwargs={"conf_threshold": 0.9}, ), AutoReviewFunction( accept_by_confidence, - labels=[ - "accept_by_confidence", - "reject_by_confidence" - ], - kwargs={"conf_threshold": 0.8} + labels=["accept_by_confidence", "reject_by_confidence"], + kwargs={"conf_threshold": 0.8}, ), AutoReviewFunction( remove_by_confidence, labels=["remove_by_confidence"], - kwargs={"conf_threshold": 0.8} + kwargs={"conf_threshold": 0.8}, ), AutoReviewFunction( reject_by_min_character_length, labels=["reject_by_min_character_length"], - kwargs={"min_length_threshold": 6} + kwargs={"min_length_threshold": 6}, ), AutoReviewFunction( reject_by_max_character_length, labels=["reject_by_max_character_length"], - kwargs={"max_length_threshold": 6} + kwargs={"max_length_threshold": 6}, ), AutoReviewFunction( accept_if_match, labels=["accept_if_match"], - kwargs={"match_text": "matching text"} - ) + kwargs={"match_text": "matching text"}, + ), ] - + reviewer = AutoReviewer(auto_review_preds, custom_functions) reviewer.apply_reviews() preds = reviewer.updated_predictions pred_map = create_pred_label_map(preds) for pred in pred_map["accept_by_all_match_and_confidence"]: - assert pred[ACCEPTED] == True + assert pred[ACCEPTED] for pred in pred_map["low_conf_accept_by_all_match_and_confidence"]: assert ACCEPTED not in pred for pred in pred_map["no_match_accept_by_all_match_and_confidence"]: assert ACCEPTED not in pred for pred in pred_map["reject_by_confidence"]: if pred["text"] == "low": - assert pred[REJECTED] == True + assert pred[REJECTED] else: - assert pred[ACCEPTED] == True + assert pred[ACCEPTED] for pred in pred_map["reject_by_min_character_length"]: if len(pred["text"]) < min_max_length: - assert pred[REJECTED] == True + assert pred[REJECTED] else: assert REJECTED not in pred for pred in pred_map["reject_by_max_character_length"]: if len(pred["text"]) > min_max_length: - assert pred[REJECTED] == True + assert pred[REJECTED] else: assert REJECTED not in pred for pred in pred_map["accept_if_match"]: - assert pred["accepted"] == True + assert pred["accepted"] assert "remove_by_confidence" not in pred diff --git a/tests/integration/test_create_client.py b/tests/integration/test_create_client.py new file mode 100644 index 00000000..dcb288c4 --- /dev/null +++ b/tests/integration/test_create_client.py @@ -0,0 +1,17 @@ +from pathlib import Path + +import pytest + +from indico_toolkit import ToolkitAuthError, create_client + + +def test_client_creation(host, token): + if Path(token).is_file(): + create_client(host, token, None) + else: + create_client(host, None, token) + + +def test_client_fail(host): + with pytest.raises(ToolkitAuthError): + create_client(host, api_token_string="not_a_real_token") diff --git a/tests/auto_populate/test_populator.py b/tests/integration/test_populator.py similarity index 64% rename from tests/auto_populate/test_populator.py rename to tests/integration/test_populator.py index 15c157d0..bd2981fd 100644 --- a/tests/auto_populate/test_populator.py +++ b/tests/integration/test_populator.py @@ -1,28 +1,32 @@ +import json import os + import pytest -import time -import json -import pandas as pd from indico.queries import GetWorkflow from indico.types import Workflow -from indico_toolkit.auto_populate.types import LabelInput, LabelInst + from indico_toolkit.auto_populate import AutoPopulator +from indico_toolkit.auto_populate.types import LabelInput, LabelInst + +pd = pytest.importorskip("pandas") @pytest.fixture(scope="function") -def static_file_to_targets(populator_snapshot_csv_path): - df = pd.read_csv(populator_snapshot_csv_path) +def static_file_to_targets(populator_snapshot_file): + df = pd.read_csv(populator_snapshot_file) file_to_targets = {} - for file, target in zip(df["file_name_1820"].to_list(), df["Toolkit Test Financial Model"].to_list()): + for file, target in zip( + df["file_name_1820"].to_list(), df["Toolkit Test Financial Model"].to_list() + ): if not isinstance(target, float): file_to_targets[file] = json.loads(target)["targets"] return file_to_targets -def test_create_classification_workflow(indico_client, testdir_file_path): +def test_create_classification_workflow(indico_client, tests_folder): auto_populator = AutoPopulator(indico_client) new_workflow = auto_populator.create_auto_classification_workflow( - os.path.join(testdir_file_path, "data/auto_class"), + os.path.join(tests_folder, "data/auto_class"), "My dataset", "My workflow", "My teach task", @@ -30,24 +34,22 @@ def test_create_classification_workflow(indico_client, testdir_file_path): assert isinstance(new_workflow, Workflow) -def test_create_classification_workflow_too_few_classes( - indico_client, testdir_file_path -): +def test_create_classification_workflow_too_few_classes(indico_client, tests_folder): auto_populator = AutoPopulator(indico_client) with pytest.raises(Exception): auto_populator.create_auto_classification_workflow( - os.path.join(testdir_file_path, "data/auto_class/class_a/"), + os.path.join(tests_folder, "data/auto_class/class_a/"), "My dataset", "My workflow", "My teach task", ) -def test_copy_teach_task(indico_client, dataset_obj, workflow_id, teach_task_id): +def test_copy_teach_task(indico_client, dataset, workflow_id, teach_task_id): auto_populator = AutoPopulator(indico_client) original_workflow = indico_client.call(GetWorkflow(workflow_id)) new_workflow = auto_populator.copy_teach_task( - dataset_id=dataset_obj.id, + dataset_id=dataset.id, teach_task_id=teach_task_id, workflow_name=f"{original_workflow.name}_Copied", data_column="text", @@ -56,7 +58,10 @@ def test_copy_teach_task(indico_client, dataset_obj, workflow_id, teach_task_id) def test_get_labels_by_filename( - indico_client, extraction_model_group_id, teach_task_id, static_file_to_targets + indico_client, + extraction_model_group_id, + teach_task_id, + static_file_to_targets, ): populator = AutoPopulator(indico_client) ( @@ -66,12 +71,10 @@ def test_get_labels_by_filename( ) = populator._get_teach_task_details(teach_task_id) labels = populator.get_labels_by_filename( - extraction_model_group_id, - static_file_to_targets, - target_name_map + extraction_model_group_id, static_file_to_targets, target_name_map ) - assert(len(labels) != 0) + assert len(labels) != 0 for label in labels: assert isinstance(label, LabelInput) for target in label.targets: - assert isinstance(target, LabelInst) \ No newline at end of file + assert isinstance(target, LabelInst) diff --git a/tests/integration/test_snapshot_upload.py b/tests/integration/test_snapshot_upload.py deleted file mode 100644 index f858fd83..00000000 --- a/tests/integration/test_snapshot_upload.py +++ /dev/null @@ -1,18 +0,0 @@ -import tempfile -from indico_toolkit.snapshots import Snapshot -from indico_toolkit.indico_wrapper import Datasets - - -# def test_workflow_submit_and_get_rows(indico_client, snapshot_csv_path): -# snap1 = Snapshot(snapshot_csv_path) -# snap2 = Snapshot(snapshot_csv_path) -# snap1.standardize_column_names() -# snap2.standardize_column_names() -# snap1.append(snap2) -# dataset = Datasets(indico_client) -# with tempfile.NamedTemporaryFile(suffix=".csv") as tf: -# snap1.to_csv(tf.name) -# mydataset = dataset.create_dataset([tf.name], "my_dataset") -# model = dataset.train_model(mydataset, "mymodel", snap1.text_col, snap1.label_col, wait=False) -# print(f"Model Group ID is {model.id}") -# assert isinstance(model.id, int) diff --git a/tests/integration/test_workflow_rows.py b/tests/integration/test_workflow_rows.py index 20d31331..5124449a 100644 --- a/tests/integration/test_workflow_rows.py +++ b/tests/integration/test_workflow_rows.py @@ -1,15 +1,18 @@ +import pytest + from indico_toolkit.association import LineItems from indico_toolkit.indico_wrapper import Workflow -from tests.conftest import MODEL_NAME -def test_workflow_submit_and_get_rows(indico_client, workflow_id, pdf_filepath): +@pytest.mark.skip(reason="broken on indico-client>=6.1.0") +def test_workflow_submit_and_get_rows(indico_client, workflow_id, pdf_file): """ - Submit a document to workflow, get results and ocr object, then association line items + Submit a document to workflow, get results and ocr object, then association line + items """ wflow = Workflow(indico_client) sub_ids = wflow.submit_documents_to_workflow( - workflow_id=workflow_id, pdf_filepaths=[pdf_filepath] + workflow_id=workflow_id, files=[pdf_file] ) wflow.wait_for_submissions_to_process(sub_ids) sub_result = wflow.get_submission_results_from_ids([sub_ids[0]])[0] diff --git a/tests/metrics/test_compare_ground_truth.py b/tests/metrics/test_compare_ground_truth.py index a21df955..d15a8f48 100644 --- a/tests/metrics/test_compare_ground_truth.py +++ b/tests/metrics/test_compare_ground_truth.py @@ -1,4 +1,5 @@ import pytest + from indico_toolkit.metrics.compare_ground_truth import CompareGroundTruth diff --git a/tests/metrics/test_plotting.py b/tests/metrics/test_plotting.py index 7c1dc14e..5dfdc2c4 100644 --- a/tests/metrics/test_plotting.py +++ b/tests/metrics/test_plotting.py @@ -1,14 +1,16 @@ import pytest -from plotly.graph_objects import Bar, Scatter + from indico_toolkit.metrics import Plotting +go = pytest.importorskip("plotly.graph_objects") + def test_add_barplot_data(): plotting = Plotting() plotting.add_barplot_data(["a", "b"], [1, 2]) plotting.add_barplot_data(["a", "b"], [3, 4]) assert len(plotting._plot_data) == 2 - assert isinstance(plotting._plot_data[0], Bar) + assert isinstance(plotting._plot_data[0], go.Bar) def test_add_line_data(): @@ -16,7 +18,7 @@ def test_add_line_data(): plotting.add_line_data([50, 100], [0.4, 0.8]) plotting.add_line_data([50, 100], [0.6, 0.9]) assert len(plotting._plot_data) == 2 - assert isinstance(plotting._plot_data[0], Scatter) + assert isinstance(plotting._plot_data[0], go.Scatter) def test_add_barplot_exception(): diff --git a/tests/pipelines/test_file_processing.py b/tests/pipelines/test_file_processing.py index 0bac6a9d..6b1c0cc3 100644 --- a/tests/pipelines/test_file_processing.py +++ b/tests/pipelines/test_file_processing.py @@ -1,33 +1,34 @@ -import json -import pytest -from pathlib import Path import os -from indico_toolkit.pipelines import FileProcessing import tempfile +from pathlib import Path +import pytest -def test_get_file_paths_from_dir(testdir_file_path): - test_dir = os.path.join(testdir_file_path, "data/samples/") +from indico_toolkit.pipelines import FileProcessing + + +def test_get_file_paths_from_dir(tests_folder): + test_dir = os.path.join(tests_folder, "data/samples/") fileproc = FileProcessing() fileproc.get_file_paths_from_dir(test_dir, accepted_types=(".pdf", ".json")) assert len(fileproc.file_paths) == 4 assert len(fileproc.invalid_suffix_paths) == 1 -def test_from_dir_absent_suffix(testdir_file_path): - test_dir = os.path.join(testdir_file_path, "data/samples/") +def test_from_dir_absent_suffix(tests_folder): + test_dir = os.path.join(tests_folder, "data/samples/") fileproc = FileProcessing() with pytest.raises(Exception): fileproc.get_file_paths_from_dir(test_dir, accepted_types=".docx") -def test_get_file_paths_from_dir_recursive(testdir_file_path): - test_dir = os.path.join(testdir_file_path, "data/") +def test_get_file_paths_from_dir_recursive(tests_folder): + test_dir = os.path.join(tests_folder, "data/") fileproc = FileProcessing() fileproc.get_file_paths_from_dir( test_dir, accepted_types=(".json",), recursive_search=True ) - assert len(fileproc.file_paths) == 4 + assert len(fileproc.file_paths) == len(list(Path(test_dir).glob("**/*.json"))) for fpath in fileproc.file_paths: assert fpath.endswith(".json") @@ -36,13 +37,13 @@ def test_move_all_filepaths(): fileproc = FileProcessing() with tempfile.TemporaryDirectory() as temp_dir_one: temp_dir_two = tempfile.TemporaryDirectory() - temp = tempfile.NamedTemporaryFile(dir=temp_dir_one, suffix='.pdf') - fileproc.move_all_file_paths(temp_dir_one,temp_dir_two.name,('pdf'),True) + temp = tempfile.NamedTemporaryFile(dir=temp_dir_one, suffix=".pdf") + fileproc.move_all_file_paths(temp_dir_one, temp_dir_two.name, ("pdf",), True) assert os.listdir(temp_dir_two.name) == [Path(temp.name).name] -def test_batch_files(testdir_file_path): - test_dir = os.path.join(testdir_file_path, "data/auto_class/") +def test_batch_files(tests_folder): + test_dir = os.path.join(tests_folder, "data/auto_class/") fileproc = FileProcessing() fileproc.get_file_paths_from_dir( test_dir, accepted_types=(".json", ".pdf", ".csv"), recursive_search=True @@ -53,8 +54,8 @@ def test_batch_files(testdir_file_path): assert len(batches[1]) == 1 -def test_remove_specified_files(testdir_file_path): - test_dir = os.path.join(testdir_file_path, "data/") +def test_remove_specified_files(tests_folder): + test_dir = os.path.join(tests_folder, "data/") fileproc = FileProcessing() fileproc.get_file_paths_from_dir( test_dir, accepted_types=(".json", ".pdf", ".csv"), recursive_search=True @@ -65,8 +66,8 @@ def test_remove_specified_files(testdir_file_path): assert file_to_remove not in fileproc.file_paths -def test_read_json(testdir_file_path): - json_path = os.path.join(testdir_file_path, "data/samples/fin_disc_result.json") +def test_read_json(tests_folder): + json_path = os.path.join(tests_folder, "data/samples/fin_disc_result.json") obj = FileProcessing.read_json(json_path) assert isinstance(obj, dict) assert "submission_id" in obj diff --git a/tests/pytest.ini b/tests/pytest.ini deleted file mode 100644 index e54e0f0e..00000000 --- a/tests/pytest.ini +++ /dev/null @@ -1,3 +0,0 @@ -[pytest] -markers = - dependency: mark a test as a dependency. \ No newline at end of file diff --git a/tests/snapshots/conftest.py b/tests/snapshots/conftest.py deleted file mode 100644 index 7667f833..00000000 --- a/tests/snapshots/conftest.py +++ /dev/null @@ -1,21 +0,0 @@ -import pytest - - -@pytest.fixture(scope="session") -def snapshot_classes(): - snapshot_classes = [ - "Grand Total Due", - "Line Cost", - "Line Description", - "Quantity", - "Sub Total", - "Tax", - "Transaction Date", - "Transaction Time", - "Vendor City", - "Vendor Name", - "Vendor State", - "Vendor Street Address", - "Vendor Zip Code", - ] - return snapshot_classes diff --git a/tests/snapshots/test_snapshot.py b/tests/snapshots/test_snapshot.py index 0a041a20..0b80399f 100644 --- a/tests/snapshots/test_snapshot.py +++ b/tests/snapshots/test_snapshot.py @@ -1,25 +1,48 @@ -import pytest import os import tempfile from copy import deepcopy -import pandas as pd + +import pytest + from indico_toolkit import ToolkitInputError from indico_toolkit.snapshots import Snapshot +pd = pytest.importorskip("pandas") + # TODO: tests for exception handling -def test_instantiation_wo_params(snapshot_csv_path): - snap = Snapshot(snapshot_csv_path) +@pytest.fixture +def snapshot_classes(): + snapshot_classes = [ + "Grand Total Due", + "Line Cost", + "Line Description", + "Quantity", + "Sub Total", + "Tax", + "Transaction Date", + "Transaction Time", + "Vendor City", + "Vendor Name", + "Vendor State", + "Vendor Street Address", + "Vendor Zip Code", + ] + return snapshot_classes + + +def test_instantiation_wo_params(snapshot_file): + snap = Snapshot(snapshot_file) assert snap.text_col == "document" assert snap.label_col == "question" assert snap.file_name_col == "file_name_10765" assert isinstance(snap.df[snap.label_col].iloc[0]["targets"], list) -def test_instantiation(snapshot_csv_path): +def test_instantiation(snapshot_file): snap = Snapshot( - snapshot_csv_path, + snapshot_file, text_col="document", label_col="question", file_name_col="file_name_10765", @@ -30,16 +53,16 @@ def test_instantiation(snapshot_csv_path): assert isinstance(snap.df[snap.label_col].iloc[0]["targets"], list) -def test_instantiation_bad_label_col(snapshot_csv_path): +def test_instantiation_bad_label_col(snapshot_file): with pytest.raises(ToolkitInputError): Snapshot( - snapshot_csv_path, + snapshot_file, label_col="file_name_10765", ) -def test_remove_extraction_labels(snapshot_csv_path): - snap = Snapshot(snapshot_csv_path) +def test_remove_extraction_labels(snapshot_file): + snap = Snapshot(snapshot_file) assert "Vendor Name" in [ i["label"] for i in snap.df[snap.label_col].iloc[0]["targets"] ] @@ -49,31 +72,31 @@ def test_remove_extraction_labels(snapshot_csv_path): ] -def test_standardize_names(snapshot_csv_path): - snap = Snapshot(snapshot_csv_path) +def test_standardize_names(snapshot_file): + snap = Snapshot(snapshot_file) snap.standardize_column_names() assert "source" and "target" and "file_name" in snap.df.columns -def test__eq__not_equal(snapshot_csv_path): - snap1 = Snapshot(snapshot_csv_path) - snap2 = Snapshot(snapshot_csv_path) +def test__eq__not_equal(snapshot_file): + snap1 = Snapshot(snapshot_file) + snap2 = Snapshot(snapshot_file) snap1.standardize_column_names() with pytest.raises(AssertionError): assert snap1 == snap2 -def test__eq__(snapshot_csv_path): - snap1 = Snapshot(snapshot_csv_path) - snap2 = Snapshot(snapshot_csv_path) +def test__eq__(snapshot_file): + snap1 = Snapshot(snapshot_file) + snap2 = Snapshot(snapshot_file) snap1.standardize_column_names() snap2.standardize_column_names() assert snap1 == snap2 -def test_append(snapshot_csv_path): - snap1 = Snapshot(snapshot_csv_path) - snap2 = Snapshot(snapshot_csv_path) +def test_append(snapshot_file): + snap1 = Snapshot(snapshot_file) + snap2 = Snapshot(snapshot_file) snap1.standardize_column_names() snap2.standardize_column_names() snap1.append(snap2) @@ -81,8 +104,8 @@ def test_append(snapshot_csv_path): assert snap1.df.shape[0] == expected_length -def test_to_csv(snapshot_csv_path): - snap = Snapshot(snapshot_csv_path) +def test_to_csv(snapshot_file): + snap = Snapshot(snapshot_file) snap.standardize_column_names() with tempfile.NamedTemporaryFile(suffix=".csv") as tf: snap.to_csv(tf.name) @@ -91,11 +114,11 @@ def test_to_csv(snapshot_csv_path): assert isinstance(df["target"][0], str) -def test_split_and_write_to_csv(snapshot_csv_path): - snap = Snapshot(snapshot_csv_path) +def test_split_and_write_to_csv(snapshot_file): + snap = Snapshot(snapshot_file) with tempfile.TemporaryDirectory() as dirpath: snap.split_and_write_to_csv(dirpath, num_splits=3, output_base_name="my_split") - original = pd.read_csv(snapshot_csv_path) + original = pd.read_csv(snapshot_file) assert original.shape[0] == 10 # / 3 = 3,3,4 df1 = pd.read_csv(os.path.join(dirpath, "my_split_1.csv")) df2 = pd.read_csv(os.path.join(dirpath, "my_split_2.csv")) @@ -109,9 +132,9 @@ def test_split_and_write_to_csv(snapshot_csv_path): assert set(full["document"].tolist()) == set(original["document"].tolist()) -def test_merge_by_file_name(snapshot_csv_path): - snap1 = Snapshot(snapshot_csv_path) - snap2 = Snapshot(snapshot_csv_path) +def test_merge_by_file_name(snapshot_file): + snap1 = Snapshot(snapshot_file) + snap2 = Snapshot(snapshot_file) snap1.standardize_column_names() snap2.standardize_column_names() snap1.merge_by_file_name(snap2) @@ -122,17 +145,17 @@ def test_merge_by_file_name(snapshot_csv_path): assert isinstance(val, dict) -def test_merge_by_file_name_columns_no_match(snapshot_csv_path): - snap1 = Snapshot(snapshot_csv_path) - snap2 = Snapshot(snapshot_csv_path) +def test_merge_by_file_name_columns_no_match(snapshot_file): + snap1 = Snapshot(snapshot_file) + snap2 = Snapshot(snapshot_file) snap1.standardize_column_names() with pytest.raises(ToolkitInputError): snap1.merge_by_file_name(snap2) -def test_merge_by_file_name_no_filename_matches(snapshot_csv_path): - snap1 = Snapshot(snapshot_csv_path) - snap2 = Snapshot(snapshot_csv_path) +def test_merge_by_file_name_no_filename_matches(snapshot_file): + snap1 = Snapshot(snapshot_file) + snap2 = Snapshot(snapshot_file) snap1.standardize_column_names() snap2.standardize_column_names() snap2.df[snap2.file_name_col] = "no_match" @@ -141,29 +164,29 @@ def test_merge_by_file_name_no_filename_matches(snapshot_csv_path): assert snap1.df[snap1.label_col].tolist() == original_labels -def test_get_extraction_label_names(snapshot_csv_path, snapshot_classes): - snap = Snapshot(snapshot_csv_path) +def test_get_extraction_label_names(snapshot_file, snapshot_classes): + snap = Snapshot(snapshot_file) label_list = snap.get_extraction_label_names() assert len(snapshot_classes) == len(label_list) for snapshot_class, test_class in zip(snapshot_classes, label_list): assert snapshot_class == test_class -def test_number_of_samples(snapshot_csv_path): - snap = Snapshot(snapshot_csv_path) +def test_number_of_samples(snapshot_file): + snap = Snapshot(snapshot_file) assert snap.number_of_samples == 10 -def test_get_all_labeled_text(snapshot_csv_path): - snap = Snapshot(snapshot_csv_path) +def test_get_all_labeled_text(snapshot_file): + snap = Snapshot(snapshot_file) labeled_text = snap.get_all_labeled_text("Vendor State") assert len(labeled_text) == 10 assert isinstance(labeled_text[0], str) assert labeled_text[0] == "WY" -def test_get_all_labeled_text_per_doc(snapshot_csv_path): - snap = Snapshot(snapshot_csv_path) +def test_get_all_labeled_text_per_doc(snapshot_file): + snap = Snapshot(snapshot_file) labeled_text = snap.get_all_labeled_text("Vendor State", return_per_document=True) assert len(labeled_text) == 10 assert isinstance(labeled_text[0], list) @@ -171,8 +194,8 @@ def test_get_all_labeled_text_per_doc(snapshot_csv_path): assert labeled_text[0][0] == "WY" -def test_update_label_col_format(old_snapshot_csv_path): - snap = Snapshot(old_snapshot_csv_path) +def test_update_label_col_format(old_snapshot_file): + snap = Snapshot(old_snapshot_file) old_df = deepcopy(snap.df) snap.update_label_col_format(task_type="annotation") diff --git a/tests/structure/test_create_structure.py b/tests/structure/test_create_structure.py deleted file mode 100644 index a1ace313..00000000 --- a/tests/structure/test_create_structure.py +++ /dev/null @@ -1,2 +0,0 @@ -import pytest - diff --git a/tests/test_create_client.py b/tests/test_create_client.py deleted file mode 100644 index fe19c8c1..00000000 --- a/tests/test_create_client.py +++ /dev/null @@ -1,16 +0,0 @@ -import os -import pytest -from indico_toolkit import create_client, ToolkitAuthError - -HOST_URL = os.environ.get("HOST_URL") -API_TOKEN_PATH = os.environ.get("API_TOKEN_PATH") -API_TOKEN = os.environ.get("API_TOKEN") - - -def test_client_creation(): - create_client(HOST_URL, API_TOKEN_PATH, API_TOKEN) - - -def test_client_fail(): - with pytest.raises(ToolkitAuthError): - create_client(HOST_URL, api_token_string="not_a_real_token") diff --git a/tests/test_retry.py b/tests/test_retry.py index f78c10c7..8ede5c5a 100644 --- a/tests/test_retry.py +++ b/tests/test_retry.py @@ -1,6 +1,6 @@ import pytest -from indico_toolkit.retry import retry, MaxRetriesExceeded +from indico_toolkit.retry import retry def test_no_errors() -> None: @@ -20,7 +20,7 @@ def raises_errors() -> None: calls += 1 raise RuntimeError() - with pytest.raises(MaxRetriesExceeded): + with pytest.raises(RuntimeError): raises_errors() assert calls == 5 @@ -51,7 +51,7 @@ async def raises_errors() -> None: calls += 1 raise RuntimeError() - with pytest.raises(MaxRetriesExceeded): + with pytest.raises(RuntimeError): await raises_errors() assert calls == 5 diff --git a/tests/types/conftest.py b/tests/types/conftest.py index acdaef04..9593b9e4 100644 --- a/tests/types/conftest.py +++ b/tests/types/conftest.py @@ -1,9 +1,13 @@ -import pytest import json from copy import deepcopy -from indico_toolkit.types import Predictions, WorkflowResult -from indico_toolkit.types import Extractions, Classification +import pytest + +from indico_toolkit.types import ( + Classification, + Extractions, + WorkflowResult, +) @pytest.fixture(scope="module") @@ -12,29 +16,38 @@ def static_extract_results(): results = json.load(infile) return results + @pytest.fixture(scope="module") def static_class_results(): with open("tests/data/samples/fin_disc_classification.json", "r") as infile: results = json.load(infile) return results + @pytest.fixture(scope="module") -def static_extract_preds(static_extract_results, model_name): - return static_extract_results["results"]["document"]["results"][model_name] +def static_extract_preds(static_extract_results): + return static_extract_results["results"]["document"]["results"][ + "Toolkit Test Financial Model" + ] + @pytest.fixture(scope="module") def static_class_preds(static_class_results): - return static_class_results["results"]["document"]["results"]["Toolkit Test Classification Model"] + return static_class_results["results"]["document"]["results"][ + "Toolkit Test Classification Model" + ] + @pytest.fixture(scope="function") def extractions_obj(static_extract_preds): return Extractions(deepcopy(static_extract_preds)) + @pytest.fixture(scope="function") def classification_obj(static_class_preds): return Classification(deepcopy(static_class_preds)) + @pytest.fixture(scope="module") def wf_result_obj(static_extract_results): return WorkflowResult(static_extract_results) - diff --git a/tests/types/test_classifications.py b/tests/types/test_classifications.py index fdabbb00..98f3e615 100644 --- a/tests/types/test_classifications.py +++ b/tests/types/test_classifications.py @@ -1,9 +1,12 @@ -from copy import deepcopy import tempfile -import pandas as pd +from copy import deepcopy + +import pytest from indico_toolkit.types import Classification, ClassificationMGP +pd = pytest.importorskip("pandas") + def test_init(static_class_preds): classification = Classification(static_class_preds) diff --git a/tests/types/test_extractions.py b/tests/types/test_extractions.py index b17015f7..6b173514 100644 --- a/tests/types/test_extractions.py +++ b/tests/types/test_extractions.py @@ -1,9 +1,12 @@ import tempfile -import pandas as pd from copy import deepcopy + import pytest + from indico_toolkit.types import Extractions +pd = pytest.importorskip("pandas") + def test_init(static_extract_preds): extractions = Extractions(static_extract_preds) @@ -120,15 +123,15 @@ def test_remove_all_by_label(test_extraction_preds): def test_exist_multiple_vals_for_label(test_extraction_preds): extract = Extractions(test_extraction_preds) - assert extract.exist_multiple_vals_for_label("Paydown Amount") == True + assert extract.exist_multiple_vals_for_label("Paydown Amount") extract.remove_except_max_confidence(["Paydown Amount"]) - assert extract.exist_multiple_vals_for_label("Paydown Amount") == False + assert not extract.exist_multiple_vals_for_label("Paydown Amount") def test_get_most_common_text_value(test_extraction_preds): extract = Extractions(test_extraction_preds) rez = extract.get_most_common_text_value("Paydown Amount") - assert rez == None + assert rez is None extract._preds.append({"label": "Paydown Amount", "text": "a"}) rez = extract.get_most_common_text_value("Paydown Amount") assert rez == "a" diff --git a/tests/types/test_predictions.py b/tests/types/test_predictions.py index 9a3800fb..c970ed2d 100644 --- a/tests/types/test_predictions.py +++ b/tests/types/test_predictions.py @@ -1,14 +1,12 @@ -from _pytest.python import Class -from tests.types.conftest import extractions_obj import pytest +from indico_toolkit.errors import ToolkitInputError from indico_toolkit.types import ( - Predictions, - Extractions, Classification, ClassificationMGP, + Extractions, + Predictions, ) -from indico_toolkit.errorss import ToolkitInputError def test_bad_type(): @@ -31,4 +29,3 @@ def test_get_obj_classification(static_class_preds): def test_get_obj_classification_mgp(): classification_obj = Predictions.get_obj({"class A": 0.6, "class B": 0.4}) assert isinstance(classification_obj, ClassificationMGP) - diff --git a/tests/types/test_worflow_result.py b/tests/types/test_worflow_result.py index 576ef552..fe9e8c8f 100644 --- a/tests/types/test_worflow_result.py +++ b/tests/types/test_worflow_result.py @@ -1,7 +1,7 @@ import pytest +from indico_toolkit import ToolkitInputError from indico_toolkit.types import Extractions, WorkflowResult -from indico_toolkit import ToolkitInputError, ToolkitStatusError from indico_toolkit.types.classification import Classification @@ -19,7 +19,10 @@ def test_bad_model_name(wf_result_obj): def test_no_final_preds(): wf_result = WorkflowResult( - {"submission_id": 12, "results": {"document": {"results": {"model_v1": {"pre_review": []}}}}}, + { + "submission_id": 12, + "results": {"document": {"results": {"model_v1": {"pre_review": []}}}}, + }, "model_v1", ) assert wf_result.final_predictions._preds == [] @@ -27,7 +30,10 @@ def test_no_final_preds(): def test_predictions_no_pre_review(): wf_result = WorkflowResult( - {"submission_id": 12, "results": {"document": {"results": {"model_v1": {"pre_review": []}}}}}, + { + "submission_id": 12, + "results": {"document": {"results": {"model_v1": {"pre_review": []}}}}, + }, "model_v1", ) assert isinstance(wf_result.get_predictions, Extractions) @@ -35,7 +41,10 @@ def test_predictions_no_pre_review(): def test_classification_predictions(): wf_result = WorkflowResult( - {"submission_id": 12, "results": {"document": {"results": {"model_v1": {"pre_review": {}}}}}}, + { + "submission_id": 12, + "results": {"document": {"results": {"model_v1": {"pre_review": {}}}}}, + }, "model_v1", ) - assert isinstance(wf_result.get_predictions, Classification) \ No newline at end of file + assert isinstance(wf_result.get_predictions, Classification)