From 31bc8eb26dd666bff50346fa17075940f059ccaa Mon Sep 17 00:00:00 2001 From: samhithamuvva <163280630+samhithamuvva@users.noreply.github.com> Date: Thu, 10 Oct 2024 00:27:22 -0700 Subject: [PATCH 1/7] Add logistic regression sentiment analysis --- python | 0 .../test_textcat/test_pure_logistic.py | 72 ++++++++ spacy/pipeline/textcat/pure_Logistic.py | 170 ++++++++++++++++++ .../textcat/pure_logistic_textcat.ipynb | 129 +++++++++++++ 4 files changed, 371 insertions(+) create mode 100644 python create mode 100644 spacy/pipeline/test_textcat/test_pure_logistic.py create mode 100644 spacy/pipeline/textcat/pure_Logistic.py create mode 100644 spacy/pipeline/textcat/pure_logistic_textcat.ipynb diff --git a/python b/python new file mode 100644 index 00000000000..e69de29bb2d diff --git a/spacy/pipeline/test_textcat/test_pure_logistic.py b/spacy/pipeline/test_textcat/test_pure_logistic.py new file mode 100644 index 00000000000..1497b5bdcda --- /dev/null +++ b/spacy/pipeline/test_textcat/test_pure_logistic.py @@ -0,0 +1,72 @@ +import pytest +from spacy.language import Language +from spacy.training import Example +import spacy +from spacy.tokens import Doc +import numpy as np + +# Define the nlp fixture +@pytest.fixture +def nlp(): + # Load the spaCy model + return spacy.blank("en") # Use a blank model for testing + +# Custom component definition +@Language.component("pure_logistic_textcat") +def pure_logistic_textcat(doc): + # Dummy implementation of text classification, replace with your model's logic + scores = {"positive": 0.5, "negative": 0.5} + + # Store the scores in a custom attribute on the doc + doc._.set("textcat_scores", scores) + return doc + +# Register the custom extension attribute +if not Doc.has_extension("textcat_scores"): + Doc.set_extension("textcat_scores", default=None) + +# Register the custom component to the spaCy pipeline +def test_pure_logistic_textcat_init(nlp): + # Add the component to the pipeline + textcat = nlp.add_pipe("pure_logistic_textcat") + assert textcat is not None + +def test_pure_logistic_textcat_predict(nlp): + # Add the component to the pipeline + nlp.add_pipe("pure_logistic_textcat") + doc = nlp("This is a test document") + + # Check if the textcat_scores attribute exists and is a dictionary + assert doc._.textcat_scores is not None + assert isinstance(doc._.textcat_scores, dict) + assert "positive" in doc._.textcat_scores + assert "negative" in doc._.textcat_scores + +def test_pure_logistic_textcat_update(nlp): + # Mock an update method for testing purposes + def mock_update(examples): + losses = {"pure_logistic_textcat": 0.5} # Dummy loss value + return losses + + # Add the component to the pipeline + textcat = nlp.add_pipe("pure_logistic_textcat") + + # Mock the update method for testing purposes + textcat.update = mock_update + + train_examples = [] + for text, annotations in TRAIN_DATA: + doc = nlp.make_doc(text) + example = Example.from_dict(doc, annotations) + train_examples.append(example) + + # Update the model + losses = textcat.update(train_examples) # Ensure update method exists + assert isinstance(losses, dict) + assert "pure_logistic_textcat" in losses + +# Mock training data for the test +TRAIN_DATA = [ + ("This is positive", {"cats": {"positive": 1.0, "negative": 0.0}}), + ("This is negative", {"cats": {"positive": 0.0, "negative": 1.0}}) +] diff --git a/spacy/pipeline/textcat/pure_Logistic.py b/spacy/pipeline/textcat/pure_Logistic.py new file mode 100644 index 00000000000..cb1cbc6e831 --- /dev/null +++ b/spacy/pipeline/textcat/pure_Logistic.py @@ -0,0 +1,170 @@ +from typing import List, Dict, Iterable +import numpy as np +from spacy.pipeline import TrainablePipe +from spacy.language import Language +from spacy.training import Example +from spacy.vocab import Vocab +from spacy.tokens import Doc + + +@Language.factory( + "pure_logistic_textcat", + default_config={ + "learning_rate": 0.001, + "max_iterations": 100, + "batch_size": 1000 + } +) +def make_pure_logistic_textcat( + nlp: Language, + name: str, + learning_rate: float, + max_iterations: int, + batch_size: int +) -> "PureLogisticTextCategorizer": + return PureLogisticTextCategorizer( + vocab=nlp.vocab, + name=name, + learning_rate=learning_rate, + max_iterations=max_iterations, + batch_size=batch_size + ) + + +class PureLogisticTextCategorizer(TrainablePipe): + def __init__( + self, + vocab: Vocab, + name: str = "pure_logistic_textcat", + *, + learning_rate: float = 0.001, + max_iterations: int = 100, + batch_size: int = 1000 + ): + """Initialize the text categorizer.""" + self.vocab = vocab + self.name = name + self.learning_rate = learning_rate + self.max_iterations = max_iterations + self.batch_size = batch_size + self.weights = None + self.bias = 0.0 + self._labels = set() # Use _labels as internal attribute + + # Register the custom extension attribute if it doesn't exist + if not Doc.has_extension("textcat_scores"): + Doc.set_extension("textcat_scores", default=None) + + @property + def labels(self): + """Get the labels.""" + return self._labels + + @labels.setter + def labels(self, value): + """Set the labels.""" + self._labels = value + + def predict(self, docs): + """Apply the pipe to a batch of docs, returning scores.""" + scores = self._predict_scores(docs) + for doc, doc_scores in zip(docs, scores): + doc._.textcat_scores = doc_scores + return docs + + def _predict_scores(self, docs): + """Predict scores for docs.""" + features = self._extract_features(docs) + scores = [] + for doc_features in features: + if self.weights is None: + doc_scores = {"positive": 0.5, "negative": 0.5} + else: + logits = np.dot(doc_features, self.weights) + self.bias + prob = 1 / (1 + np.exp(-logits)) + doc_scores = { + "positive": float(prob), + "negative": float(1 - prob) + } + scores.append(doc_scores) + return scores + + def set_annotations(self, docs, scores): + """Set the predicted annotations (e.g. categories) on the docs.""" + for doc, score in zip(docs, scores): + doc.cats = {label: score[i] for i, label in enumerate(self._labels)} + + def _extract_features(self, docs) -> List[np.ndarray]: + """Extract features from docs.""" + features = [] + for doc in docs: + # Basic features + doc_vector = doc.vector + n_tokens = len(doc) + + # Additional features + n_entities = len(doc.ents) + avg_token_length = np.mean([len(token.text) for token in doc]) + n_stopwords = len([token for token in doc if token.is_stop]) + + # Combine features + doc_features = np.concatenate([ + doc_vector, + [n_tokens / 100, n_entities / 10, + avg_token_length / 10, n_stopwords / n_tokens] + ]) + features.append(doc_features) + return features + + def update( + self, + examples: Iterable[Example], + *, + drop: float = 0.0, + sgd=None, + losses: Dict[str, float] = None + ) -> Dict[str, float]: + """Update the model.""" + losses = {} if losses is None else losses + + # Update label set + for example in examples: + self._labels.update(example.reference.cats.keys()) + + # Extract features and labels + docs = [example.reference for example in examples] + label_arrays = self._make_label_array([example.reference.cats for example in examples]) + + features = self._extract_features(docs) + + if self.weights is None: + n_features = features[0].shape[0] if features else 0 + self.weights = np.zeros((n_features, 1)) + + # Simple gradient descent + total_loss = 0.0 + for i in range(self.max_iterations): + for feat, gold in zip(features, label_arrays): + pred = 1 / (1 + np.exp(-(np.dot(feat, self.weights) + self.bias))) + loss = -np.mean(gold * np.log(pred + 1e-8) + + (1 - gold) * np.log(1 - pred + 1e-8)) + total_loss += loss + + # Compute gradients + d_weights = feat.reshape(-1, 1) * (pred - gold) + d_bias = pred - gold + + # Update weights + self.weights -= self.learning_rate * d_weights + self.bias -= self.learning_rate * float(d_bias) + + losses[self.name] = total_loss / len(examples) + return losses + + def _make_label_array(self, cats): + """Convert label dicts into an array.""" + arr = np.zeros((len(cats),)) + for i, cat_dict in enumerate(cats): + if cat_dict.get("positive", 0) > 0.5: + arr[i] = 1.0 + return arr.reshape(-1, 1) diff --git a/spacy/pipeline/textcat/pure_logistic_textcat.ipynb b/spacy/pipeline/textcat/pure_logistic_textcat.ipynb new file mode 100644 index 00000000000..b8d95a76d8d --- /dev/null +++ b/spacy/pipeline/textcat/pure_logistic_textcat.ipynb @@ -0,0 +1,129 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'cells': [{'cell_type': 'markdown',\n", + " 'metadata': {},\n", + " 'source': ['# Pure Logistic Regression Text Categorizer\\n',\n", + " 'This tutorial demonstrates how to use the custom logistic regression text categorizer.']},\n", + " {'cell_type': 'code',\n", + " 'execution_count': None,\n", + " 'metadata': {},\n", + " 'source': ['import spacy\\n',\n", + " 'from spacy.training import Example\\n',\n", + " '\\n',\n", + " '# Load spaCy model\\n',\n", + " 'nlp = spacy.load(\"en_core_web_lg\")\\n',\n", + " 'nlp.add_pipe(\"pure_logistic_textcat\")\\n',\n", + " '\\n',\n", + " '# Example training data\\n',\n", + " 'TRAIN_DATA = [\\n',\n", + " ' (\"This is amazing!\", {\"cats\": {\"positive\": 1.0, \"negative\": 0.0}}),\\n',\n", + " ' (\"This is terrible!\", {\"cats\": {\"positive\": 0.0, \"negative\": 1.0}})\\n',\n", + " ']\\n',\n", + " '\\n',\n", + " '# Create training examples\\n',\n", + " 'examples = []\\n',\n", + " 'for text, annotations in TRAIN_DATA:\\n',\n", + " ' doc = nlp.make_doc(text)\\n',\n", + " ' example = Example.from_dict(doc, annotations)\\n',\n", + " ' examples.append(example)\\n',\n", + " '\\n',\n", + " '# Train the model\\n',\n", + " 'textcat = nlp.get_pipe(\"pure_logistic_textcat\")\\n',\n", + " 'losses = textcat.update(examples)\\n',\n", + " 'print(f\"Losses: {losses}\")\\n',\n", + " '\\n',\n", + " '# Test the model\\n',\n", + " 'test_text = \"This product is fantastic!\"\\n',\n", + " 'doc = nlp(test_text)\\n',\n", + " 'print(f\"\\\\nText: {test_text}\")\\n',\n", + " 'print(f\"Predictions: {doc.cats}\")']}]}" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "{\n", + " \"cells\": [\n", + " {\n", + " \"cell_type\": \"markdown\",\n", + " \"metadata\": {},\n", + " \"source\": [\n", + " \"# Pure Logistic Regression Text Categorizer\\n\",\n", + " \"This tutorial demonstrates how to use the custom logistic regression text categorizer.\"\n", + " ]\n", + " },\n", + " {\n", + " \"cell_type\": \"code\",\n", + " \"execution_count\": None,\n", + " \"metadata\": {},\n", + " \"source\": [\n", + " \"import spacy\\n\",\n", + " \"from spacy.training import Example\\n\",\n", + " \"\\n\",\n", + " \"# Load spaCy model\\n\",\n", + " \"nlp = spacy.load(\\\"en_core_web_lg\\\")\\n\",\n", + " \"nlp.add_pipe(\\\"pure_logistic_textcat\\\")\\n\",\n", + " \"\\n\",\n", + " \"# Example training data\\n\",\n", + " \"TRAIN_DATA = [\\n\",\n", + " \" (\\\"This is amazing!\\\", {\\\"cats\\\": {\\\"positive\\\": 1.0, \\\"negative\\\": 0.0}}),\\n\",\n", + " \" (\\\"This is terrible!\\\", {\\\"cats\\\": {\\\"positive\\\": 0.0, \\\"negative\\\": 1.0}})\\n\",\n", + " \"]\\n\",\n", + " \"\\n\",\n", + " \"# Create training examples\\n\",\n", + " \"examples = []\\n\",\n", + " \"for text, annotations in TRAIN_DATA:\\n\",\n", + " \" doc = nlp.make_doc(text)\\n\",\n", + " \" example = Example.from_dict(doc, annotations)\\n\",\n", + " \" examples.append(example)\\n\",\n", + " \"\\n\",\n", + " \"# Train the model\\n\",\n", + " \"textcat = nlp.get_pipe(\\\"pure_logistic_textcat\\\")\\n\",\n", + " \"losses = textcat.update(examples)\\n\",\n", + " \"print(f\\\"Losses: {losses}\\\")\\n\",\n", + " \"\\n\",\n", + " \"# Test the model\\n\",\n", + " \"test_text = \\\"This product is fantastic!\\\"\\n\",\n", + " \"doc = nlp(test_text)\\n\",\n", + " \"print(f\\\"\\\\nText: {test_text}\\\")\\n\",\n", + " \"print(f\\\"Predictions: {doc.cats}\\\")\"\n", + " ]\n", + " }\n", + " ]\n", + "}" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From d6c93bbf432c56e938723c1046c6f4390ceafe8d Mon Sep 17 00:00:00 2001 From: samhithamuvva <163280630+samhithamuvva@users.noreply.github.com> Date: Thu, 10 Oct 2024 01:21:52 -0700 Subject: [PATCH 2/7] Add logistic regression sentiment analysis --- README_Sentiment_Analysis_spaCy.md | 62 ++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 README_Sentiment_Analysis_spaCy.md diff --git a/README_Sentiment_Analysis_spaCy.md b/README_Sentiment_Analysis_spaCy.md new file mode 100644 index 00000000000..d161f27e29f --- /dev/null +++ b/README_Sentiment_Analysis_spaCy.md @@ -0,0 +1,62 @@ +Sentiment Analysis Using Logistic Regression (using spaCy) +This repository provides a Text Categorization model using logistic regression built on spaCy without using scikit-learn. It aims to classify text as positive or negative based on custom logistic regression implementation. The project includes training and testing scripts for sentiment analysis. + +💬 Project Highlights +Custom Logistic Regression Model: Implemented from scratch using Python. +Natural Language Processing: Leveraging spaCy for text preprocessing (tokenization, vectorization). +No External ML Libraries: The project does not rely on external libraries like scikit-learn. + +✨ Features +Text Classification: Sentiment analysis using logistic regression. +Preprocessing: spaCy is used to tokenize and vectorize text data. +Evaluation Tools: Includes scripts to evaluate the performance of the model on test datasets. +Modular Design: Easily replace datasets and tweak preprocessing steps. + +📦 Installation +To begin with, you'll need Python 3.7 or higher and install spaCy and its required language model. Here's how to set it up: +pip install spacy +python -m spacy download en_core_web_lg + +🚀 Quickstart +Clone the repository: +git clone https://github.com/yourusername/sentiment-analysis-logisticregression.git +cd sentiment-analysis-logisticregression + +Run the logistic regression model: +python pure_Logistic.py +Test the model: +python test_pure_logistic.py + +🗂️ Project Structure +│ +├── spacy/ # Contains spaCy-related pipeline and models +│ ├── pipeline/textcat/pure_Logistic.py # SpaCy text classification models +│ └── pipeline/test_text/test_pure_Logistic.py # Logistic regression implementation +└── README_Sentiment_Anlysis_spaCy.md # This file + +🔧 Usage +For using the model, you don't need to re-implement any functionality. The PureLogisticTextCategorizer class, which is defined in pure_Logistic.py, can be directly imported and used in your test scripts. + +To execute the model: +python test_pure_logistic.py +In this file, you can import the logistic regression class as: +from pure_Logistic import PureLogisticTextCategorizer +This will allow you to run predefined test cases and evaluate the performance of the logistic regression model on your test data. + + +📊 Model Details +The model performs sentiment analysis by leveraging spaCy's powerful text preprocessing capabilities. The logistic regression classifier is implemented manually, without any help of scikit-learn or any other major machine learning libraries. + +spaCy is used to preprocess text, including tokenization, vectorization, and feature extraction. +Logistic Regression is implemented in pure Python for binary classification (positive vs negative sentiment). + +🛠️ Development +Requirements +Python 3.7+ +spaCy + +To install the required packages, run: +pip install spacy +python -m spacy download en_core_web_lg +Contributing +Feel free to fork the repository, make updates, and submit pull requests. Suggestions for improvements are always welcome. From 6e0240b56d179ebbdcb1762bde4ab471cd69279c Mon Sep 17 00:00:00 2001 From: samhithamuvva <163280630+samhithamuvva@users.noreply.github.com> Date: Thu, 10 Oct 2024 01:41:04 -0700 Subject: [PATCH 3/7] Add logistic regression sentiment analysis --- README_Sentiment_Analysis_spaCy.md | 62 ------------------------------ 1 file changed, 62 deletions(-) delete mode 100644 README_Sentiment_Analysis_spaCy.md diff --git a/README_Sentiment_Analysis_spaCy.md b/README_Sentiment_Analysis_spaCy.md deleted file mode 100644 index d161f27e29f..00000000000 --- a/README_Sentiment_Analysis_spaCy.md +++ /dev/null @@ -1,62 +0,0 @@ -Sentiment Analysis Using Logistic Regression (using spaCy) -This repository provides a Text Categorization model using logistic regression built on spaCy without using scikit-learn. It aims to classify text as positive or negative based on custom logistic regression implementation. The project includes training and testing scripts for sentiment analysis. - -💬 Project Highlights -Custom Logistic Regression Model: Implemented from scratch using Python. -Natural Language Processing: Leveraging spaCy for text preprocessing (tokenization, vectorization). -No External ML Libraries: The project does not rely on external libraries like scikit-learn. - -✨ Features -Text Classification: Sentiment analysis using logistic regression. -Preprocessing: spaCy is used to tokenize and vectorize text data. -Evaluation Tools: Includes scripts to evaluate the performance of the model on test datasets. -Modular Design: Easily replace datasets and tweak preprocessing steps. - -📦 Installation -To begin with, you'll need Python 3.7 or higher and install spaCy and its required language model. Here's how to set it up: -pip install spacy -python -m spacy download en_core_web_lg - -🚀 Quickstart -Clone the repository: -git clone https://github.com/yourusername/sentiment-analysis-logisticregression.git -cd sentiment-analysis-logisticregression - -Run the logistic regression model: -python pure_Logistic.py -Test the model: -python test_pure_logistic.py - -🗂️ Project Structure -│ -├── spacy/ # Contains spaCy-related pipeline and models -│ ├── pipeline/textcat/pure_Logistic.py # SpaCy text classification models -│ └── pipeline/test_text/test_pure_Logistic.py # Logistic regression implementation -└── README_Sentiment_Anlysis_spaCy.md # This file - -🔧 Usage -For using the model, you don't need to re-implement any functionality. The PureLogisticTextCategorizer class, which is defined in pure_Logistic.py, can be directly imported and used in your test scripts. - -To execute the model: -python test_pure_logistic.py -In this file, you can import the logistic regression class as: -from pure_Logistic import PureLogisticTextCategorizer -This will allow you to run predefined test cases and evaluate the performance of the logistic regression model on your test data. - - -📊 Model Details -The model performs sentiment analysis by leveraging spaCy's powerful text preprocessing capabilities. The logistic regression classifier is implemented manually, without any help of scikit-learn or any other major machine learning libraries. - -spaCy is used to preprocess text, including tokenization, vectorization, and feature extraction. -Logistic Regression is implemented in pure Python for binary classification (positive vs negative sentiment). - -🛠️ Development -Requirements -Python 3.7+ -spaCy - -To install the required packages, run: -pip install spacy -python -m spacy download en_core_web_lg -Contributing -Feel free to fork the repository, make updates, and submit pull requests. Suggestions for improvements are always welcome. From 08a3b6bf89f9dc8a80843f267a52aba7a33b950b Mon Sep 17 00:00:00 2001 From: samhithamuvva <163280630+samhithamuvva@users.noreply.github.com> Date: Thu, 10 Oct 2024 02:39:43 -0700 Subject: [PATCH 4/7] Add logistic regression sentiment analysis --- README.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/README.md b/README.md index afa96363b65..c3e56ca2fb5 100644 --- a/README.md +++ b/README.md @@ -227,6 +227,28 @@ nlp = en_core_web_sm.load() doc = nlp("This is a sentence.") ``` +## 📊 Custom Sentiment Analysis with Logistic Regression (spaCy-based) +This repository also includes a custom **Logistic Regression** sentiment analysis model built using spaCy, without using scikit-learn. The model classifies text as positive or negative based on a dataset such as IMDb reviews. + +### Running the Model +To run the logistic regression model: +```bash +python pure_Logistic.py +```This script processes the dataset using spaCy, trains the logistic regression model, and outputs the results. + +### Testing and Evaluation +To run tests and evaluate the model's performance, use: +```bash +python test_pure_logistic.py +``` + +In your test script, import the PureLogisticTextCategorizer class for evaluation: +```bash +from pure_Logistic import PureLogisticTextCategorizer +``` +This enables you to evaluate the logistic regression classifier on your test cases. + + 📖 **For more info and examples, check out the [models documentation](https://spacy.io/docs/usage/models).** From fc404ef0d41d2682694b5a3b71c79c104b406449 Mon Sep 17 00:00:00 2001 From: samhithamuvva <163280630+samhithamuvva@users.noreply.github.com> Date: Thu, 10 Oct 2024 03:07:45 -0700 Subject: [PATCH 5/7] Your commit message --- README.md | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index c3e56ca2fb5..d59a94f5094 100644 --- a/README.md +++ b/README.md @@ -227,30 +227,32 @@ nlp = en_core_web_sm.load() doc = nlp("This is a sentence.") ``` -## 📊 Custom Sentiment Analysis with Logistic Regression (spaCy-based) -This repository also includes a custom **Logistic Regression** sentiment analysis model built using spaCy, without using scikit-learn. The model classifies text as positive or negative based on a dataset such as IMDb reviews. +📖 **For more info and examples, check out the +[models documentation](https://spacy.io/docs/usage/models).** + +## 📊 Custom Sentiment Analysis with Logistic Regression + +This implementation includes a custom **Logistic Regression** sentiment analysis model built using spaCy, without using scikit-learn. The model classifies text as positive or negative based on datasets like IMDb reviews. ### Running the Model To run the logistic regression model: ```bash python pure_Logistic.py -```This script processes the dataset using spaCy, trains the logistic regression model, and outputs the results. +``` ### Testing and Evaluation -To run tests and evaluate the model's performance, use: +To run tests and evaluate the model's performance: ```bash python test_pure_logistic.py ``` -In your test script, import the PureLogisticTextCategorizer class for evaluation: -```bash +To use the model in your own code: +```python from pure_Logistic import PureLogisticTextCategorizer -``` -This enables you to evaluate the logistic regression classifier on your test cases. - -📖 **For more info and examples, check out the -[models documentation](https://spacy.io/docs/usage/models).** +# Initialize and use the classifier +categorizer = PureLogisticTextCategorizer() +``` ## ⚒ Compile from source @@ -309,4 +311,4 @@ Alternatively, you can run `pytest` on the tests from within the installed ```bash pip install -r requirements.txt python -m pytest --pyargs spacy -``` +``` \ No newline at end of file From dd9bfa8d3311584c9aff2abeeea24f0d0d19166b Mon Sep 17 00:00:00 2001 From: samhithamuvva <163280630+samhithamuvva@users.noreply.github.com> Date: Thu, 10 Oct 2024 03:09:44 -0700 Subject: [PATCH 6/7] Add logistic regression sentiment analysis --- README.md | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index d59a94f5094..c3e56ca2fb5 100644 --- a/README.md +++ b/README.md @@ -227,32 +227,30 @@ nlp = en_core_web_sm.load() doc = nlp("This is a sentence.") ``` -📖 **For more info and examples, check out the -[models documentation](https://spacy.io/docs/usage/models).** - -## 📊 Custom Sentiment Analysis with Logistic Regression - -This implementation includes a custom **Logistic Regression** sentiment analysis model built using spaCy, without using scikit-learn. The model classifies text as positive or negative based on datasets like IMDb reviews. +## 📊 Custom Sentiment Analysis with Logistic Regression (spaCy-based) +This repository also includes a custom **Logistic Regression** sentiment analysis model built using spaCy, without using scikit-learn. The model classifies text as positive or negative based on a dataset such as IMDb reviews. ### Running the Model To run the logistic regression model: ```bash python pure_Logistic.py -``` +```This script processes the dataset using spaCy, trains the logistic regression model, and outputs the results. ### Testing and Evaluation -To run tests and evaluate the model's performance: +To run tests and evaluate the model's performance, use: ```bash python test_pure_logistic.py ``` -To use the model in your own code: -```python +In your test script, import the PureLogisticTextCategorizer class for evaluation: +```bash from pure_Logistic import PureLogisticTextCategorizer - -# Initialize and use the classifier -categorizer = PureLogisticTextCategorizer() ``` +This enables you to evaluate the logistic regression classifier on your test cases. + + +📖 **For more info and examples, check out the +[models documentation](https://spacy.io/docs/usage/models).** ## ⚒ Compile from source @@ -311,4 +309,4 @@ Alternatively, you can run `pytest` on the tests from within the installed ```bash pip install -r requirements.txt python -m pytest --pyargs spacy -``` \ No newline at end of file +``` From 422e383d8f825cfb9d58e89b97d0348c480ba31a Mon Sep 17 00:00:00 2001 From: samhithamuvva <163280630+samhithamuvva@users.noreply.github.com> Date: Thu, 10 Oct 2024 12:43:06 -0700 Subject: [PATCH 7/7] Add logistic regression sentiment analysis --- README.md | 21 +- .../logreg/examples/evaluate_textcat.py | 138 ++++++ .../logreg/myenv/Scripts/Activate.ps1 | 443 ++++++++++++++++++ spacy/pipeline/logreg/myenv/Scripts/activate | 69 +++ spacy/pipeline/logreg/myenv/Scripts/f2py.exe | Bin 0 -> 108421 bytes .../logreg/myenv/Scripts/markdown-it.exe | Bin 0 -> 106377 bytes .../logreg/myenv/Scripts/numpy-config.exe | Bin 0 -> 108421 bytes spacy/pipeline/logreg/myenv/Scripts/pip.exe | Bin 0 -> 108426 bytes .../pipeline/logreg/myenv/Scripts/pip3.10.exe | Bin 0 -> 108426 bytes spacy/pipeline/logreg/myenv/Scripts/pip3.exe | Bin 0 -> 108426 bytes .../logreg/myenv/Scripts/pygmentize.exe | Bin 0 -> 106372 bytes .../pipeline/logreg/myenv/Scripts/python.exe | Bin 0 -> 264176 bytes .../pipeline/logreg/myenv/Scripts/pythonw.exe | Bin 0 -> 252912 bytes spacy/pipeline/logreg/myenv/Scripts/spacy.exe | Bin 0 -> 106375 bytes spacy/pipeline/logreg/myenv/Scripts/tqdm.exe | Bin 0 -> 106364 bytes spacy/pipeline/logreg/myenv/Scripts/typer.exe | Bin 0 -> 106365 bytes .../pipeline/logreg/myenv/Scripts/weasel.exe | Bin 0 -> 106364 bytes spacy/pipeline/logreg/myenv/pyvenv.cfg | 3 + spacy/pipeline/logreg/src/pure_Logistic.py | 224 +++++++++ .../logreg/tests/test_pure_logistic.py | 225 +++++++++ .../test_textcat/test_pure_logistic.py | 72 --- spacy/pipeline/textcat/pure_Logistic.py | 170 ------- .../textcat/pure_logistic_textcat.ipynb | 129 ----- 23 files changed, 1116 insertions(+), 378 deletions(-) create mode 100644 spacy/pipeline/logreg/examples/evaluate_textcat.py create mode 100644 spacy/pipeline/logreg/myenv/Scripts/Activate.ps1 create mode 100644 spacy/pipeline/logreg/myenv/Scripts/activate create mode 100644 spacy/pipeline/logreg/myenv/Scripts/f2py.exe create mode 100644 spacy/pipeline/logreg/myenv/Scripts/markdown-it.exe create mode 100644 spacy/pipeline/logreg/myenv/Scripts/numpy-config.exe create mode 100644 spacy/pipeline/logreg/myenv/Scripts/pip.exe create mode 100644 spacy/pipeline/logreg/myenv/Scripts/pip3.10.exe create mode 100644 spacy/pipeline/logreg/myenv/Scripts/pip3.exe create mode 100644 spacy/pipeline/logreg/myenv/Scripts/pygmentize.exe create mode 100644 spacy/pipeline/logreg/myenv/Scripts/python.exe create mode 100644 spacy/pipeline/logreg/myenv/Scripts/pythonw.exe create mode 100644 spacy/pipeline/logreg/myenv/Scripts/spacy.exe create mode 100644 spacy/pipeline/logreg/myenv/Scripts/tqdm.exe create mode 100644 spacy/pipeline/logreg/myenv/Scripts/typer.exe create mode 100644 spacy/pipeline/logreg/myenv/Scripts/weasel.exe create mode 100644 spacy/pipeline/logreg/myenv/pyvenv.cfg create mode 100644 spacy/pipeline/logreg/src/pure_Logistic.py create mode 100644 spacy/pipeline/logreg/tests/test_pure_logistic.py delete mode 100644 spacy/pipeline/test_textcat/test_pure_logistic.py delete mode 100644 spacy/pipeline/textcat/pure_Logistic.py delete mode 100644 spacy/pipeline/textcat/pure_logistic_textcat.ipynb diff --git a/README.md b/README.md index c3e56ca2fb5..2ef905ff5af 100644 --- a/README.md +++ b/README.md @@ -227,6 +227,9 @@ nlp = en_core_web_sm.load() doc = nlp("This is a sentence.") ``` +📖 **For more info and examples, check out the +[models documentation](https://spacy.io/docs/usage/models).** + ## 📊 Custom Sentiment Analysis with Logistic Regression (spaCy-based) This repository also includes a custom **Logistic Regression** sentiment analysis model built using spaCy, without using scikit-learn. The model classifies text as positive or negative based on a dataset such as IMDb reviews. @@ -234,23 +237,27 @@ This repository also includes a custom **Logistic Regression** sentiment analysi To run the logistic regression model: ```bash python pure_Logistic.py -```This script processes the dataset using spaCy, trains the logistic regression model, and outputs the results. - +``` +This script processes the dataset using spaCy, trains the logistic regression model, and outputs the results. ### Testing and Evaluation +To run tests and evaluate the model's performance: To run tests and evaluate the model's performance, use: + ```bash python test_pure_logistic.py ``` - -In your test script, import the PureLogisticTextCategorizer class for evaluation: +To use the model in your own code: +In your test script, ```bash +import the PureLogisticTextCategorizer class for evaluation: from pure_Logistic import PureLogisticTextCategorizer ``` -This enables you to evaluate the logistic regression classifier on your test cases. +# Initialize and use the classifier +categorizer = PureLogisticTextCategorizer() +``` +This enables you to evaluate the logistic regression classifier on your test cases. -📖 **For more info and examples, check out the -[models documentation](https://spacy.io/docs/usage/models).** ## ⚒ Compile from source diff --git a/spacy/pipeline/logreg/examples/evaluate_textcat.py b/spacy/pipeline/logreg/examples/evaluate_textcat.py new file mode 100644 index 00000000000..5de2ef84ef7 --- /dev/null +++ b/spacy/pipeline/logreg/examples/evaluate_textcat.py @@ -0,0 +1,138 @@ +import spacy +from spacy.training import Example +from spacy.tokens import Doc +from typing import Dict, List + +# Import the custom logistic classifier +from pure_Logistic import make_pure_logistic_textcat + + +# Registering the custom extension 'textcat' to store predictions +if not Doc.has_extension("textcat"): + Doc.set_extension("textcat", default={}) + + +# Sample training and testing data +TRAIN_DATA = [ + ("This product is amazing! I love it.", {"cats": {"positive": 1.0, "negative": 0.0}}), + ("The service was excellent and staff very friendly.", {"cats": {"positive": 1.0, "negative": 0.0}}), + ("I'm really impressed with the quality.", {"cats": {"positive": 1.0, "negative": 0.0}}), + ("Best purchase I've made in years!", {"cats": {"positive": 1.0, "negative": 0.0}}), + ("The features work exactly as advertised.", {"cats": {"positive": 1.0, "negative": 0.0}}), + ("This is terrible, complete waste of money.", {"cats": {"positive": 0.0, "negative": 1.0}}), + ("Poor customer service, very disappointing.", {"cats": {"positive": 0.0, "negative": 1.0}}), + ("The product broke after one week.", {"cats": {"positive": 0.0, "negative": 1.0}}), + ("Would not recommend to anyone.", {"cats": {"positive": 0.0, "negative": 1.0}}), + ("Save your money and avoid this.", {"cats": {"positive": 0.0, "negative": 1.0}}) +] + +TEST_DATA = [ + ("Great product, highly recommend!", {"cats": {"positive": 1.0, "negative": 0.0}}), + ("Not worth the price at all.", {"cats": {"positive": 0.0, "negative": 1.0}}), + ("Everything works perfectly.", {"cats": {"positive": 1.0, "negative": 0.0}}), + ("Disappointed with the results.", {"cats": {"positive": 0.0, "negative": 1.0}}) +] + +def calculate_metrics(true_positives: int, true_negatives: int, false_positives: int, false_negatives: int) -> Dict[str, float]: + """Calculate evaluation metrics based on counts.""" + total = true_positives + true_negatives + false_positives + false_negatives + accuracy = (true_positives + true_negatives) / total if total > 0 else 0 + precision = true_positives / (true_positives + false_positives) if (true_positives + false_positives) > 0 else 0 + recall = true_positives / (true_positives + false_negatives) if (true_positives + false_negatives) > 0 else 0 + f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0 + + return { + "accuracy": accuracy, + "precision": precision, + "recall": recall, + "f1": f1 + } + +def evaluate_model(nlp, test_data): + """Evaluate the model using the test data.""" + true_positives = true_negatives = false_positives = false_negatives = 0 + predictions = [] + + for text, annotations in test_data: + doc = nlp(text) + true_cats = annotations["cats"] + pred_cats = doc._.textcat # Predictions from the custom model + + # Extract scores for 'positive' and 'negative' + pred_positive_score = pred_cats["positive"] if "positive" in pred_cats else 0.0 + true_positive_score = true_cats.get("positive", 0.0) + + pred_positive = float(pred_positive_score) > 0.5 + true_positive = float(true_positive_score) > 0.5 + + # Update counts based on predictions + if true_positive and pred_positive: + true_positives += 1 + elif not true_positive and not pred_positive: + true_negatives += 1 + elif not true_positive and pred_positive: + false_positives += 1 + else: + false_negatives += 1 + + predictions.append({ + "text": text, + "true": "positive" if true_positive else "negative", + "predicted": "positive" if pred_positive else "negative", + "scores": pred_cats + }) + + metrics = calculate_metrics(true_positives, true_negatives, false_positives, false_negatives) + return metrics, predictions + + +def main(): + try: + print("Loading spaCy model...") + nlp = spacy.load("en_core_web_lg") + except OSError: + print("Downloading spaCy model...") + spacy.cli.download("en_core_web_lg") + nlp = spacy.load("en_core_web_lg") + + print("Adding custom text categorizer...") + config = { + "learning_rate": 0.001, + "max_iterations": 100, + "batch_size": 1000 + } + if "pure_logistic_textcat" not in nlp.pipe_names: + textcat = nlp.add_pipe("pure_logistic_textcat", config=config) + textcat.labels = {"positive", "negative"} + + print("Preparing training examples...") + train_examples = [] + for text, annotations in TRAIN_DATA: + doc = nlp.make_doc(text) + example = Example.from_dict(doc, annotations) + train_examples.append(example) + + print("Training the model...") + textcat = nlp.get_pipe("pure_logistic_textcat") + losses = textcat.update(train_examples) + print(f"Training losses: {losses}") + + print("\nEvaluating the model...") + metrics, predictions = evaluate_model(nlp, TEST_DATA) + + print("\nEvaluation Metrics:") + print(f"Accuracy: {metrics['accuracy']:.3f}") + print(f"Precision: {metrics['precision']:.3f}") + print(f"Recall: {metrics['recall']:.3f}") + print(f"F1 Score: {metrics['f1']:.3f}") + + print("\nDetailed Predictions:") + for pred in predictions: + print(f"\nText: {pred['text']}") + print(f"True label: {pred['true']}") + print(f"Predicted: {pred['predicted']}") + print(f"Positive score: {pred['scores']['positive']:.3f}") + print(f"Negative score: {pred['scores']['negative']:.3f}") + +if __name__ == "__main__": + main() diff --git a/spacy/pipeline/logreg/myenv/Scripts/Activate.ps1 b/spacy/pipeline/logreg/myenv/Scripts/Activate.ps1 new file mode 100644 index 00000000000..d00d7d4fbe0 --- /dev/null +++ b/spacy/pipeline/logreg/myenv/Scripts/Activate.ps1 @@ -0,0 +1,443 @@ +<# +.Synopsis +Activate a Python virtual environment for the current PowerShell session. + +.Description +Pushes the python executable for a virtual environment to the front of the +$Env:PATH environment variable and sets the prompt to signify that you are +in a Python virtual environment. Makes use of the command line switches as +well as the `pyvenv.cfg` file values present in the virtual environment. + +.Parameter VenvDir +Path to the directory that contains the virtual environment to activate. The +default value for this is the parent of the directory that the Activate.ps1 +script is located within. + +.Parameter Prompt +The prompt prefix to display when this virtual environment is activated. By +default, this prompt is the name of the virtual environment folder (VenvDir) +surrounded by parentheses and followed by a single space (ie. '(.venv) '). + +.Example +Activate.ps1 +Activates the Python virtual environment that contains the Activate.ps1 script. + +.Example +Activate.ps1 -Verbose +Activates the Python virtual environment that contains the Activate.ps1 script, +and shows extra information about the activation as it executes. + +.Example +Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv +Activates the Python virtual environment located in the specified location. + +.Example +Activate.ps1 -Prompt "MyPython" +Activates the Python virtual environment that contains the Activate.ps1 script, +and prefixes the current prompt with the specified string (surrounded in +parentheses) while the virtual environment is active. + +.Notes +On Windows, it may be required to enable this Activate.ps1 script by setting the +execution policy for the user. You can do this by issuing the following PowerShell +command: + +PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser + +For more information on Execution Policies: +https://go.microsoft.com/fwlink/?LinkID=135170 + +#> +Param( + [Parameter(Mandatory = $false)] + [String] + $VenvDir, + [Parameter(Mandatory = $false)] + [String] + $Prompt +) + +<# Function declarations --------------------------------------------------- #> + +<# +.Synopsis +Remove all shell session elements added by the Activate script, including the +addition of the virtual environment's Python executable from the beginning of +the PATH variable. + +.Parameter NonDestructive +If present, do not remove this function from the global namespace for the +session. + +#> +function global:deactivate ([switch]$NonDestructive) { + # Revert to original values + + # The prior prompt: + if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { + Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt + Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT + } + + # The prior PYTHONHOME: + if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { + Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME + Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME + } + + # The prior PATH: + if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { + Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH + Remove-Item -Path Env:_OLD_VIRTUAL_PATH + } + + # Just remove the VIRTUAL_ENV altogether: + if (Test-Path -Path Env:VIRTUAL_ENV) { + Remove-Item -Path env:VIRTUAL_ENV + } + + # Just remove VIRTUAL_ENV_PROMPT altogether. + if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { + Remove-Item -Path env:VIRTUAL_ENV_PROMPT + } + + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: + if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { + Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force + } + + # Leave deactivate function in the global namespace if requested: + if (-not $NonDestructive) { + Remove-Item -Path function:deactivate + } +} + +<# +.Description +Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the +given folder, and returns them in a map. + +For each line in the pyvenv.cfg file, if that line can be parsed into exactly +two strings separated by `=` (with any amount of whitespace surrounding the =) +then it is considered a `key = value` line. The left hand string is the key, +the right hand is the value. + +If the value starts with a `'` or a `"` then the first and last character is +stripped from the value before being captured. + +.Parameter ConfigDir +Path to the directory that contains the `pyvenv.cfg` file. +#> +function Get-PyVenvConfig( + [String] + $ConfigDir +) { + Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" + + # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). + $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue + + # An empty map will be returned if no config file is found. + $pyvenvConfig = @{ } + + if ($pyvenvConfigPath) { + + Write-Verbose "File exists, parse `key = value` lines" + $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath + + $pyvenvConfigContent | ForEach-Object { + $keyval = $PSItem -split "\s*=\s*", 2 + if ($keyval[0] -and $keyval[1]) { + $val = $keyval[1] + + # Remove extraneous quotations around a string value. + if ("'""".Contains($val.Substring(0, 1))) { + $val = $val.Substring(1, $val.Length - 2) + } + + $pyvenvConfig[$keyval[0]] = $val + Write-Verbose "Adding Key: '$($keyval[0])'='$val'" + } + } + } + return $pyvenvConfig +} + + +<# Begin Activate script --------------------------------------------------- #> + +# Determine the containing directory of this script +$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition +$VenvExecDir = Get-Item -Path $VenvExecPath + +Write-Verbose "Activation script is located in path: '$VenvExecPath'" +Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" +Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" + +# Set values required in priority: CmdLine, ConfigFile, Default +# First, get the location of the virtual environment, it might not be +# VenvExecDir if specified on the command line. +if ($VenvDir) { + Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" +} +else { + Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." + $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") + Write-Verbose "VenvDir=$VenvDir" +} + +# Next, read the `pyvenv.cfg` file to determine any required value such +# as `prompt`. +$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir + +# Next, set the prompt from the command line, or the config file, or +# just use the name of the virtual environment folder. +if ($Prompt) { + Write-Verbose "Prompt specified as argument, using '$Prompt'" +} +else { + Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" + if ($pyvenvCfg -and $pyvenvCfg['prompt']) { + Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" + $Prompt = $pyvenvCfg['prompt']; + } + else { + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" + Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" + $Prompt = Split-Path -Path $venvDir -Leaf + } +} + +Write-Verbose "Prompt = '$Prompt'" +Write-Verbose "VenvDir='$VenvDir'" + +# Deactivate any currently active virtual environment, but leave the +# deactivate function in place. +deactivate -nondestructive + +# Now set the environment variable VIRTUAL_ENV, used by many tools to determine +# that there is an activated venv. +$env:VIRTUAL_ENV = $VenvDir + +if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { + + Write-Verbose "Setting prompt to '$Prompt'" + + # Set the prompt to include the env name + # Make sure _OLD_VIRTUAL_PROMPT is global + function global:_OLD_VIRTUAL_PROMPT { "" } + Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT + New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt + + function global:prompt { + Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " + _OLD_VIRTUAL_PROMPT + } + $env:VIRTUAL_ENV_PROMPT = $Prompt +} + +# Clear PYTHONHOME +if (Test-Path -Path Env:PYTHONHOME) { + Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME + Remove-Item -Path Env:PYTHONHOME +} + +# Add the venv to the PATH +Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH +$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" + +# SIG # Begin signature block +# MIIj/wYJKoZIhvcNAQcCoIIj8DCCI+wCAQExDzANBglghkgBZQMEAgEFADB5Bgor +# BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG +# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCBnL745ElCYk8vk +# dBtMuQhLeWJ3ZGfzKW4DHCYzAn+QB6CCDi8wggawMIIEmKADAgECAhAIrUCyYNKc +# TJ9ezam9k67ZMA0GCSqGSIb3DQEBDAUAMGIxCzAJBgNVBAYTAlVTMRUwEwYDVQQK +# EwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xITAfBgNV +# BAMTGERpZ2lDZXJ0IFRydXN0ZWQgUm9vdCBHNDAeFw0yMTA0MjkwMDAwMDBaFw0z +# NjA0MjgyMzU5NTlaMGkxCzAJBgNVBAYTAlVTMRcwFQYDVQQKEw5EaWdpQ2VydCwg +# SW5jLjFBMD8GA1UEAxM4RGlnaUNlcnQgVHJ1c3RlZCBHNCBDb2RlIFNpZ25pbmcg +# UlNBNDA5NiBTSEEzODQgMjAyMSBDQTEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +# ggIKAoICAQDVtC9C0CiteLdd1TlZG7GIQvUzjOs9gZdwxbvEhSYwn6SOaNhc9es0 +# JAfhS0/TeEP0F9ce2vnS1WcaUk8OoVf8iJnBkcyBAz5NcCRks43iCH00fUyAVxJr +# Q5qZ8sU7H/Lvy0daE6ZMswEgJfMQ04uy+wjwiuCdCcBlp/qYgEk1hz1RGeiQIXhF +# LqGfLOEYwhrMxe6TSXBCMo/7xuoc82VokaJNTIIRSFJo3hC9FFdd6BgTZcV/sk+F +# LEikVoQ11vkunKoAFdE3/hoGlMJ8yOobMubKwvSnowMOdKWvObarYBLj6Na59zHh +# 3K3kGKDYwSNHR7OhD26jq22YBoMbt2pnLdK9RBqSEIGPsDsJ18ebMlrC/2pgVItJ +# wZPt4bRc4G/rJvmM1bL5OBDm6s6R9b7T+2+TYTRcvJNFKIM2KmYoX7BzzosmJQay +# g9Rc9hUZTO1i4F4z8ujo7AqnsAMrkbI2eb73rQgedaZlzLvjSFDzd5Ea/ttQokbI +# YViY9XwCFjyDKK05huzUtw1T0PhH5nUwjewwk3YUpltLXXRhTT8SkXbev1jLchAp +# QfDVxW0mdmgRQRNYmtwmKwH0iU1Z23jPgUo+QEdfyYFQc4UQIyFZYIpkVMHMIRro +# OBl8ZhzNeDhFMJlP/2NPTLuqDQhTQXxYPUez+rbsjDIJAsxsPAxWEQIDAQABo4IB +# WTCCAVUwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUaDfg67Y7+F8Rhvv+ +# YXsIiGX0TkIwHwYDVR0jBBgwFoAU7NfjgtJxXWRM3y5nP+e6mK4cD08wDgYDVR0P +# AQH/BAQDAgGGMBMGA1UdJQQMMAoGCCsGAQUFBwMDMHcGCCsGAQUFBwEBBGswaTAk +# BggrBgEFBQcwAYYYaHR0cDovL29jc3AuZGlnaWNlcnQuY29tMEEGCCsGAQUFBzAC +# hjVodHRwOi8vY2FjZXJ0cy5kaWdpY2VydC5jb20vRGlnaUNlcnRUcnVzdGVkUm9v +# dEc0LmNydDBDBgNVHR8EPDA6MDigNqA0hjJodHRwOi8vY3JsMy5kaWdpY2VydC5j +# b20vRGlnaUNlcnRUcnVzdGVkUm9vdEc0LmNybDAcBgNVHSAEFTATMAcGBWeBDAED +# MAgGBmeBDAEEATANBgkqhkiG9w0BAQwFAAOCAgEAOiNEPY0Idu6PvDqZ01bgAhql +# +Eg08yy25nRm95RysQDKr2wwJxMSnpBEn0v9nqN8JtU3vDpdSG2V1T9J9Ce7FoFF +# UP2cvbaF4HZ+N3HLIvdaqpDP9ZNq4+sg0dVQeYiaiorBtr2hSBh+3NiAGhEZGM1h +# mYFW9snjdufE5BtfQ/g+lP92OT2e1JnPSt0o618moZVYSNUa/tcnP/2Q0XaG3Ryw +# YFzzDaju4ImhvTnhOE7abrs2nfvlIVNaw8rpavGiPttDuDPITzgUkpn13c5Ubdld +# AhQfQDN8A+KVssIhdXNSy0bYxDQcoqVLjc1vdjcshT8azibpGL6QB7BDf5WIIIJw +# 8MzK7/0pNVwfiThV9zeKiwmhywvpMRr/LhlcOXHhvpynCgbWJme3kuZOX956rEnP +# LqR0kq3bPKSchh/jwVYbKyP/j7XqiHtwa+aguv06P0WmxOgWkVKLQcBIhEuWTatE +# QOON8BUozu3xGFYHKi8QxAwIZDwzj64ojDzLj4gLDb879M4ee47vtevLt/B3E+bn +# KD+sEq6lLyJsQfmCXBVmzGwOysWGw/YmMwwHS6DTBwJqakAwSEs0qFEgu60bhQji +# WQ1tygVQK+pKHJ6l/aCnHwZ05/LWUpD9r4VIIflXO7ScA+2GRfS0YW6/aOImYIbq +# yK+p/pQd52MbOoZWeE4wggd3MIIFX6ADAgECAhAHHxQbizANJfMU6yMM0NHdMA0G +# CSqGSIb3DQEBCwUAMGkxCzAJBgNVBAYTAlVTMRcwFQYDVQQKEw5EaWdpQ2VydCwg +# SW5jLjFBMD8GA1UEAxM4RGlnaUNlcnQgVHJ1c3RlZCBHNCBDb2RlIFNpZ25pbmcg +# UlNBNDA5NiBTSEEzODQgMjAyMSBDQTEwHhcNMjIwMTE3MDAwMDAwWhcNMjUwMTE1 +# MjM1OTU5WjB8MQswCQYDVQQGEwJVUzEPMA0GA1UECBMGT3JlZ29uMRIwEAYDVQQH +# EwlCZWF2ZXJ0b24xIzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9u +# MSMwIQYDVQQDExpQeXRob24gU29mdHdhcmUgRm91bmRhdGlvbjCCAiIwDQYJKoZI +# hvcNAQEBBQADggIPADCCAgoCggIBAKgc0BTT+iKbtK6f2mr9pNMUTcAJxKdsuOiS +# YgDFfwhjQy89koM7uP+QV/gwx8MzEt3c9tLJvDccVWQ8H7mVsk/K+X+IufBLCgUi +# 0GGAZUegEAeRlSXxxhYScr818ma8EvGIZdiSOhqjYc4KnfgfIS4RLtZSrDFG2tN1 +# 6yS8skFa3IHyvWdbD9PvZ4iYNAS4pjYDRjT/9uzPZ4Pan+53xZIcDgjiTwOh8VGu +# ppxcia6a7xCyKoOAGjvCyQsj5223v1/Ig7Dp9mGI+nh1E3IwmyTIIuVHyK6Lqu35 +# 2diDY+iCMpk9ZanmSjmB+GMVs+H/gOiofjjtf6oz0ki3rb7sQ8fTnonIL9dyGTJ0 +# ZFYKeb6BLA66d2GALwxZhLe5WH4Np9HcyXHACkppsE6ynYjTOd7+jN1PRJahN1oE +# RzTzEiV6nCO1M3U1HbPTGyq52IMFSBM2/07WTJSbOeXjvYR7aUxK9/ZkJiacl2iZ +# I7IWe7JKhHohqKuceQNyOzxTakLcRkzynvIrk33R9YVqtB4L6wtFxhUjvDnQg16x +# ot2KVPdfyPAWd81wtZADmrUtsZ9qG79x1hBdyOl4vUtVPECuyhCxaw+faVjumapP +# Unwo8ygflJJ74J+BYxf6UuD7m8yzsfXWkdv52DjL74TxzuFTLHPyARWCSCAbzn3Z +# Ily+qIqDAgMBAAGjggIGMIICAjAfBgNVHSMEGDAWgBRoN+Drtjv4XxGG+/5hewiI +# ZfROQjAdBgNVHQ4EFgQUt/1Teh2XDuUj2WW3siYWJgkZHA8wDgYDVR0PAQH/BAQD +# AgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMDMIG1BgNVHR8Ega0wgaowU6BRoE+GTWh0 +# dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0ZWRHNENvZGVTaWdu +# aW5nUlNBNDA5NlNIQTM4NDIwMjFDQTEuY3JsMFOgUaBPhk1odHRwOi8vY3JsNC5k +# aWdpY2VydC5jb20vRGlnaUNlcnRUcnVzdGVkRzRDb2RlU2lnbmluZ1JTQTQwOTZT +# SEEzODQyMDIxQ0ExLmNybDA+BgNVHSAENzA1MDMGBmeBDAEEATApMCcGCCsGAQUF +# BwIBFhtodHRwOi8vd3d3LmRpZ2ljZXJ0LmNvbS9DUFMwgZQGCCsGAQUFBwEBBIGH +# MIGEMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wXAYIKwYB +# BQUHMAKGUGh0dHA6Ly9jYWNlcnRzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0 +# ZWRHNENvZGVTaWduaW5nUlNBNDA5NlNIQTM4NDIwMjFDQTEuY3J0MAwGA1UdEwEB +# /wQCMAAwDQYJKoZIhvcNAQELBQADggIBABxv4AeV/5ltkELHSC63fXAFYS5tadcW +# TiNc2rskrNLrfH1Ns0vgSZFoQxYBFKI159E8oQQ1SKbTEubZ/B9kmHPhprHya08+ +# VVzxC88pOEvz68nA82oEM09584aILqYmj8Pj7h/kmZNzuEL7WiwFa/U1hX+XiWfL +# IJQsAHBla0i7QRF2de8/VSF0XXFa2kBQ6aiTsiLyKPNbaNtbcucaUdn6vVUS5izW +# OXM95BSkFSKdE45Oq3FForNJXjBvSCpwcP36WklaHL+aHu1upIhCTUkzTHMh8b86 +# WmjRUqbrnvdyR2ydI5l1OqcMBjkpPpIV6wcc+KY/RH2xvVuuoHjlUjwq2bHiNoX+ +# W1scCpnA8YTs2d50jDHUgwUo+ciwpffH0Riq132NFmrH3r67VaN3TuBxjI8SIZM5 +# 8WEDkbeoriDk3hxU8ZWV7b8AW6oyVBGfM06UgkfMb58h+tJPrFx8VI/WLq1dTqMf +# ZOm5cuclMnUHs2uqrRNtnV8UfidPBL4ZHkTcClQbCoz0UbLhkiDvIS00Dn+BBcxw +# /TKqVL4Oaz3bkMSsM46LciTeucHY9ExRVt3zy7i149sd+F4QozPqn7FrSVHXmem3 +# r7bjyHTxOgqxRCVa18Vtx7P/8bYSBeS+WHCKcliFCecspusCDSlnRUjZwyPdP0VH +# xaZg2unjHY3rMYIVJjCCFSICAQEwfTBpMQswCQYDVQQGEwJVUzEXMBUGA1UEChMO +# RGlnaUNlcnQsIEluYy4xQTA/BgNVBAMTOERpZ2lDZXJ0IFRydXN0ZWQgRzQgQ29k +# ZSBTaWduaW5nIFJTQTQwOTYgU0hBMzg0IDIwMjEgQ0ExAhAHHxQbizANJfMU6yMM +# 0NHdMA0GCWCGSAFlAwQCAQUAoIHEMBkGCSqGSIb3DQEJAzEMBgorBgEEAYI3AgEE +# MBwGCisGAQQBgjcCAQsxDjAMBgorBgEEAYI3AgEVMC8GCSqGSIb3DQEJBDEiBCBn +# AZ6P7YvTwq0fbF62o7E75R0LxsW5OtyYiFESQckLhjBYBgorBgEEAYI3AgEMMUow +# SKBGgEQAQgB1AGkAbAB0ADoAIABSAGUAbABlAGEAcwBlAF8AdgAzAC4AMQAwAC4A +# NQBfADIAMAAyADIAMAA2ADAANgAuADAAMTANBgkqhkiG9w0BAQEFAASCAgA5LMM8 +# 8+phW11oF/PTFxitR3oW7QHlGHA97n1MCieor042JtmqUyqqf7ykapKc/ND4pVDP +# DP8nhIeXuLd2/SHqqf6CLZX9yacAFPDCV/MtYhlw4yKwa2ECw9EDDwB670UwUW/j +# IUl+fSrWagwH2WC7T5iMiV7uEZU4koGuOS4SiDzRLwTcuRtY6N/FYerQhioHXzdX +# vO76qXnj4UIDWnWbSWLgPDo8g4xonm7BC0dFRn4WW8tgm/StxQ/TBS4L2O/LEjYy +# pSLEXOy0INrA5CqWd4J4dpOhkQng1UJoySCL9Q2ceyv1U3SrywLY4rLwmSrZYsbQ +# OpnL+P1DP/eHYPbcwQEhbaTj81ULMxNDnouXJMm6ErMgTRH6TTpDcuPI8qlqkT2E +# DGZ4pPdZSHxDYkocJ6REh1YKlpvdHaGQFkXuc3p2lG/siv2rtDefI4wChN4VOHZG +# ia6G3FZaIyqFW/0sFz5KOzxoxcjfzyO76SSJx9jYpuOmPrHihaOlFjzZGxnWwFdM +# l3uCD+QeJL2bkl7npoyW0RRznBUUj21psHdVN5vzK+Gsyr22A9lS1XaX3a2KJ6bl +# Krkj+PObW5dtxvso0bQss2FCFdOATk4AlFcmk6bWk8rZm+w4e9NugsCTI+IE45hL +# AEyzTjc21JqGt8l2Rn/eElRHgsjvNpO4H5FFo6GCEbMwghGvBgorBgEEAYI3AwMB +# MYIRnzCCEZsGCSqGSIb3DQEHAqCCEYwwghGIAgEDMQ8wDQYJYIZIAWUDBAIBBQAw +# eAYLKoZIhvcNAQkQAQSgaQRnMGUCAQEGCWCGSAGG/WwHATAxMA0GCWCGSAFlAwQC +# AQUABCDX6Ys0ehzU7Uygr+TZMXB4pMkJvCegnm5JrODTttrXZwIRAMaBOV1Pb1sY +# w0ypALrk6u8YDzIwMjIwNjA2MTYyMjEwWqCCDXwwggbGMIIErqADAgECAhAKekqI +# nsmZQpAGYzhNhpedMA0GCSqGSIb3DQEBCwUAMGMxCzAJBgNVBAYTAlVTMRcwFQYD +# VQQKEw5EaWdpQ2VydCwgSW5jLjE7MDkGA1UEAxMyRGlnaUNlcnQgVHJ1c3RlZCBH +# NCBSU0E0MDk2IFNIQTI1NiBUaW1lU3RhbXBpbmcgQ0EwHhcNMjIwMzI5MDAwMDAw +# WhcNMzMwMzE0MjM1OTU5WjBMMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNl +# cnQsIEluYy4xJDAiBgNVBAMTG0RpZ2lDZXJ0IFRpbWVzdGFtcCAyMDIyIC0gMjCC +# AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALkqliOmXLxf1knwFYIY9DPu +# zFxs4+AlLtIx5DxArvurxON4XX5cNur1JY1Do4HrOGP5PIhp3jzSMFENMQe6Rm7p +# o0tI6IlBfw2y1vmE8Zg+C78KhBJxbKFiJgHTzsNs/aw7ftwqHKm9MMYW2Nq867Lx +# g9GfzQnFuUFqRUIjQVr4YNNlLD5+Xr2Wp/D8sfT0KM9CeR87x5MHaGjlRDRSXw9Q +# 3tRZLER0wDJHGVvimC6P0Mo//8ZnzzyTlU6E6XYYmJkRFMUrDKAz200kheiClOEv +# A+5/hQLJhuHVGBS3BEXz4Di9or16cZjsFef9LuzSmwCKrB2NO4Bo/tBZmCbO4O2u +# fyguwp7gC0vICNEyu4P6IzzZ/9KMu/dDI9/nw1oFYn5wLOUrsj1j6siugSBrQ4nI +# fl+wGt0ZvZ90QQqvuY4J03ShL7BUdsGQT5TshmH/2xEvkgMwzjC3iw9dRLNDHSNQ +# zZHXL537/M2xwafEDsTvQD4ZOgLUMalpoEn5deGb6GjkagyP6+SxIXuGZ1h+fx/o +# K+QUshbWgaHK2jCQa+5vdcCwNiayCDv/vb5/bBMY38ZtpHlJrYt/YYcFaPfUcONC +# leieu5tLsuK2QT3nr6caKMmtYbCgQRgZTu1Hm2GV7T4LYVrqPnqYklHNP8lE54CL +# KUJy93my3YTqJ+7+fXprAgMBAAGjggGLMIIBhzAOBgNVHQ8BAf8EBAMCB4AwDAYD +# VR0TAQH/BAIwADAWBgNVHSUBAf8EDDAKBggrBgEFBQcDCDAgBgNVHSAEGTAXMAgG +# BmeBDAEEAjALBglghkgBhv1sBwEwHwYDVR0jBBgwFoAUuhbZbU2FL3MpdpovdYxq +# II+eyG8wHQYDVR0OBBYEFI1kt4kh/lZYRIRhp+pvHDaP3a8NMFoGA1UdHwRTMFEw +# T6BNoEuGSWh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0ZWRH +# NFJTQTQwOTZTSEEyNTZUaW1lU3RhbXBpbmdDQS5jcmwwgZAGCCsGAQUFBwEBBIGD +# MIGAMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wWAYIKwYB +# BQUHMAKGTGh0dHA6Ly9jYWNlcnRzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0 +# ZWRHNFJTQTQwOTZTSEEyNTZUaW1lU3RhbXBpbmdDQS5jcnQwDQYJKoZIhvcNAQEL +# BQADggIBAA0tI3Sm0fX46kuZPwHk9gzkrxad2bOMl4IpnENvAS2rOLVwEb+EGYs/ +# XeWGT76TOt4qOVo5TtiEWaW8G5iq6Gzv0UhpGThbz4k5HXBw2U7fIyJs1d/2Wcuh +# wupMdsqh3KErlribVakaa33R9QIJT4LWpXOIxJiA3+5JlbezzMWn7g7h7x44ip/v +# EckxSli23zh8y/pc9+RTv24KfH7X3pjVKWWJD6KcwGX0ASJlx+pedKZbNZJQfPQX +# podkTz5GiRZjIGvL8nvQNeNKcEiptucdYL0EIhUlcAZyqUQ7aUcR0+7px6A+TxC5 +# MDbk86ppCaiLfmSiZZQR+24y8fW7OK3NwJMR1TJ4Sks3KkzzXNy2hcC7cDBVeNaY +# /lRtf3GpSBp43UZ3Lht6wDOK+EoojBKoc88t+dMj8p4Z4A2UKKDr2xpRoJWCjihr +# pM6ddt6pc6pIallDrl/q+A8GQp3fBmiW/iqgdFtjZt5rLLh4qk1wbfAs8QcVfjW0 +# 5rUMopml1xVrNQ6F1uAszOAMJLh8UgsemXzvyMjFjFhpr6s94c/MfRWuFL+Kcd/K +# l7HYR+ocheBFThIcFClYzG/Tf8u+wQ5KbyCcrtlzMlkI5y2SoRoR/jKYpl0rl+CL +# 05zMbbUNrkdjOEcXW28T2moQbh9Jt0RbtAgKh1pZBHYRoad3AhMcMIIGrjCCBJag +# AwIBAgIQBzY3tyRUfNhHrP0oZipeWzANBgkqhkiG9w0BAQsFADBiMQswCQYDVQQG +# EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +# cnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwHhcNMjIw +# MzIzMDAwMDAwWhcNMzcwMzIyMjM1OTU5WjBjMQswCQYDVQQGEwJVUzEXMBUGA1UE +# ChMORGlnaUNlcnQsIEluYy4xOzA5BgNVBAMTMkRpZ2lDZXJ0IFRydXN0ZWQgRzQg +# UlNBNDA5NiBTSEEyNTYgVGltZVN0YW1waW5nIENBMIICIjANBgkqhkiG9w0BAQEF +# AAOCAg8AMIICCgKCAgEAxoY1BkmzwT1ySVFVxyUDxPKRN6mXUaHW0oPRnkyibaCw +# zIP5WvYRoUQVQl+kiPNo+n3znIkLf50fng8zH1ATCyZzlm34V6gCff1DtITaEfFz +# sbPuK4CEiiIY3+vaPcQXf6sZKz5C3GeO6lE98NZW1OcoLevTsbV15x8GZY2UKdPZ +# 7Gnf2ZCHRgB720RBidx8ald68Dd5n12sy+iEZLRS8nZH92GDGd1ftFQLIWhuNyG7 +# QKxfst5Kfc71ORJn7w6lY2zkpsUdzTYNXNXmG6jBZHRAp8ByxbpOH7G1WE15/teP +# c5OsLDnipUjW8LAxE6lXKZYnLvWHpo9OdhVVJnCYJn+gGkcgQ+NDY4B7dW4nJZCY +# OjgRs/b2nuY7W+yB3iIU2YIqx5K/oN7jPqJz+ucfWmyU8lKVEStYdEAoq3NDzt9K +# oRxrOMUp88qqlnNCaJ+2RrOdOqPVA+C/8KI8ykLcGEh/FDTP0kyr75s9/g64ZCr6 +# dSgkQe1CvwWcZklSUPRR8zZJTYsg0ixXNXkrqPNFYLwjjVj33GHek/45wPmyMKVM +# 1+mYSlg+0wOI/rOP015LdhJRk8mMDDtbiiKowSYI+RQQEgN9XyO7ZONj4KbhPvbC +# dLI/Hgl27KtdRnXiYKNYCQEoAA6EVO7O6V3IXjASvUaetdN2udIOa5kM0jO0zbEC +# AwEAAaOCAV0wggFZMBIGA1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0OBBYEFLoW2W1N +# hS9zKXaaL3WMaiCPnshvMB8GA1UdIwQYMBaAFOzX44LScV1kTN8uZz/nupiuHA9P +# MA4GA1UdDwEB/wQEAwIBhjATBgNVHSUEDDAKBggrBgEFBQcDCDB3BggrBgEFBQcB +# AQRrMGkwJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2ljZXJ0LmNvbTBBBggr +# BgEFBQcwAoY1aHR0cDovL2NhY2VydHMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0VHJ1 +# c3RlZFJvb3RHNC5jcnQwQwYDVR0fBDwwOjA4oDagNIYyaHR0cDovL2NybDMuZGln +# aWNlcnQuY29tL0RpZ2lDZXJ0VHJ1c3RlZFJvb3RHNC5jcmwwIAYDVR0gBBkwFzAI +# BgZngQwBBAIwCwYJYIZIAYb9bAcBMA0GCSqGSIb3DQEBCwUAA4ICAQB9WY7Ak7Zv +# mKlEIgF+ZtbYIULhsBguEE0TzzBTzr8Y+8dQXeJLKftwig2qKWn8acHPHQfpPmDI +# 2AvlXFvXbYf6hCAlNDFnzbYSlm/EUExiHQwIgqgWvalWzxVzjQEiJc6VaT9Hd/ty +# dBTX/6tPiix6q4XNQ1/tYLaqT5Fmniye4Iqs5f2MvGQmh2ySvZ180HAKfO+ovHVP +# ulr3qRCyXen/KFSJ8NWKcXZl2szwcqMj+sAngkSumScbqyQeJsG33irr9p6xeZmB +# o1aGqwpFyd/EjaDnmPv7pp1yr8THwcFqcdnGE4AJxLafzYeHJLtPo0m5d2aR8XKc +# 6UsCUqc3fpNTrDsdCEkPlM05et3/JWOZJyw9P2un8WbDQc1PtkCbISFA0LcTJM3c +# HXg65J6t5TRxktcma+Q4c6umAU+9Pzt4rUyt+8SVe+0KXzM5h0F4ejjpnOHdI/0d +# KNPH+ejxmF/7K9h+8kaddSweJywm228Vex4Ziza4k9Tm8heZWcpw8De/mADfIBZP +# J/tgZxahZrrdVcA6KYawmKAr7ZVBtzrVFZgxtGIJDwq9gdkT/r+k0fNX2bwE+oLe +# Mt8EifAAzV3C+dAjfwAL5HYCJtnwZXZCpimHCUcr5n8apIUP/JiW9lVUKx+A+sDy +# Divl1vupL0QVSucTDh3bNzgaoSv27dZ8/DGCA3YwggNyAgEBMHcwYzELMAkGA1UE +# BhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJbmMuMTswOQYDVQQDEzJEaWdpQ2Vy +# dCBUcnVzdGVkIEc0IFJTQTQwOTYgU0hBMjU2IFRpbWVTdGFtcGluZyBDQQIQCnpK +# iJ7JmUKQBmM4TYaXnTANBglghkgBZQMEAgEFAKCB0TAaBgkqhkiG9w0BCQMxDQYL +# KoZIhvcNAQkQAQQwHAYJKoZIhvcNAQkFMQ8XDTIyMDYwNjE2MjIxMFowKwYLKoZI +# hvcNAQkQAgwxHDAaMBgwFgQUhQjzhlFcs9MHfba0t8B/G0peQd4wLwYJKoZIhvcN +# AQkEMSIEIOf/YoAGTg8y0pigG0kgexHa3asvnqD00Uf8JB3uQ5TUMDcGCyqGSIb3 +# DQEJEAIvMSgwJjAkMCIEIJ2mkBXDScbBiXhFujWCrXDIj6QpO9tqvpwr0lOSeeY7 +# MA0GCSqGSIb3DQEBAQUABIICALVOybzMu47x8CdSSeAuaV/YXzBq1oDqNnX+Fry/ +# 7C7TpHKVn58SKdFgeNmneBuqBqlZ2qyO9h02ZercH2d3GfALKuEmcUcp/Ik6RqQR +# INN76QLhzFeIiIdBGvcHI2hcx3OAgtenpe+4V2oWa05cJf5exXQ9ja59aNB0sf5j +# GyyHgmPhRK6itjp7xoSOw5zY4NN91viV2DX23b0SiL3oB5bAzgL77RLydmgg4XIW +# 9vxqyCK8XM4imdLfnI0J+Sw7QBLk5Pw1jp/x0YNbHlk5ojA06ehufF0smFdgjMBZ +# eefNH+lXfdVBeml8j3rNNbGsQ+d6+xXmUUVnNAGwK8QH5LpCqe+7H0r3yFsBCoxI +# XaAPC9EPQVMYyPFyzh8Omu5RHQaeIARZvTyzk3BzjyJmDypOcy3s1a4YG0lsO8+b +# cI925YMstRe3/gWSfZj8Q4OXFpeJxQ1b4w1slH116IrtjR9FC+N9OEWMggi4YQQf +# V6DPuNmv9d4JMR/vwxU4XmvHG/HnbFyFrpFmlRpSTExv3XNQWcdSn0FneKw1evvZ +# RRHow/HShcRnIPRqfhnqlQNxUKLt9bmWnRXLkaNCtiowSJ82v9XnTboZunXbMSb0 +# dM5FF5o4xTVoyp6P0O2qF2QtaXU03P8MDNOD1sWFSWhi64FWnmXuIaAuJKn05ZgC +# hIIC +# SIG # End signature block diff --git a/spacy/pipeline/logreg/myenv/Scripts/activate b/spacy/pipeline/logreg/myenv/Scripts/activate new file mode 100644 index 00000000000..29e3594c728 --- /dev/null +++ b/spacy/pipeline/logreg/myenv/Scripts/activate @@ -0,0 +1,69 @@ +# This file must be used with "source bin/activate" *from bash* +# you cannot run it directly + +deactivate () { + # reset old environment variables + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # This should detect bash and zsh, which have a hash command that must + # be called to get it to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null + fi + + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT + if [ ! "${1:-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +VIRTUAL_ENV="C:\Users\samhi\spaCy\spacy\pipeline\logreg\myenv" +export VIRTUAL_ENV + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/Scripts:$PATH" +export PATH + +# unset PYTHONHOME if set +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + PS1="(myenv) ${PS1:-}" + export PS1 + VIRTUAL_ENV_PROMPT="(myenv) " + export VIRTUAL_ENV_PROMPT +fi + +# This should detect bash and zsh, which have a hash command that must +# be called to get it to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null +fi diff --git a/spacy/pipeline/logreg/myenv/Scripts/f2py.exe b/spacy/pipeline/logreg/myenv/Scripts/f2py.exe new file mode 100644 index 0000000000000000000000000000000000000000..48e9d7f53c750ba4d80189ea3741f6d42f46a767 GIT binary patch literal 108421 zcmeFadw5jU)%ZWjWXKQ_P7p@IO-Bic#!G0tBo5RJ%;*`JC{}2xf}+8Qib}(bU_}i* zNt@v~ed)#4zP;$%+PC)dzP-K@u*HN(5-vi(8(ykWyqs}B0W}HN^ZTrQW|Da6`@GNh z?;nrOIeVXdS$plZ*IsMwwRUQ*Tjz4ST&_I+w{4fJg{S
9ahiv48usxvYph53A*~8(9C(zhxUuAG_s-p91ME#!0Q$JSe%fv0pf`Iy`k-vUY&tiPqL?X zvbdHFYS-%QRTNw0a;_E}o fZE#A@+KUZ! $4dp*1|c4o(ssj&>wkjNm~aX$iNMcV14@ZI|{H zteO#9yn&@U{r+j|$KTficN6^epS51~xY&fSu_`(9-m4Oc$sEe1%lMrkgUjW+tc!5e zgK{8^X`#jX1dbAKLcU~WI1ZN@hgR(%0-TSU^Zzg(+AFW7aED6TPGE$v?$2xWANhN3 zW^=8_`jB8w;_b6g-wYRiU%+k67$s$3wB$Xs=d4%s)FPu#V6f=L>+hd{RBmFN6nK~Q zA^ONfNwq$8>`Yr+CA|pKr0h>E5yX|AZ((`Y_fSPl*yW&O<`6hpr$o84=fePl5_C zaAEblI|_9p=={%tjKW&}Qy)B05hJb3$n&TS>r9<>y=?g_8$~(U+kv0F5JIzmL=C|Y zZ)J4f@p-JT{x2itfeVp|Ey%yJbBS+bz>^`fePLGA;jI0~kn)bwvfi#>U*yiT&fXvT z4rhDNs-1*Z?WeU??I8oHfTyh&-;zr7G(5#-l0>GH$oZj|R=mf_>Gl0sTV>q8Vl3wn zdnv2JW@#f$u?hH`amgUb2{IfW&n>$;Q@%~zNn~pY1t+^N;^&?Q*%BichZ7V)-sAVM z` bpKsGH=pT&i!vuH0x=%)GL8)31qNbEr*FT7eaVPc5%> zpSU6JKHQejp@j%9+xp|%wukSC2Lw+t^xt&FptzLtz_Eqqf~G!ooqABDH)4e{92UxX zMrX>|
0LWzQKOtB?ny+XZb^ =4+M+5=f4>c;9Ej z7tu5vdBuH+=f+s
r}mV#cafb!(7!3=m#mFD z_fnX*eH*epc{IzneS5Rx3ZQ|aZ|1dqqFdH!WBEMP_8uSFwjBftUrA^ogl_n> 2W*^$!WUD&UoL(n6bH?yJyA+6E+Oy7Cl-d z*t+q5LmxrcebPxks(H>oiW7E!(|QSy3YqK)OrF`)cT>_IS*7|zi958qAz7j8nwEO^ z` gOEPNKGP&=L73boh(8E8x%E b4b zzCsCqKgN_WpON=OB|MFS^ekbfl(0Vzx?I)bW1CPw`Y4B_T@^LCdx;WhZE~8UMWaMK z%03I?P-P1wuh|pXqop@jPoOUXq#rLL1;pD$P4W*WphWe+QQnqt>cn*J%P0?e1f 6Rp^+8hqunvz;&Sx6HQKa3hu^Pxm{_Jlp?Umh)V2_!_b2+z(u zcHOpiR_segNsE@x6z*V}0y7Ty&>(SrGz8JD2 8qn_-zOuCpD~#2Ct1kRYrW2tIXVZ7^q;c=qU}w6z5VCR3nEV6wu JZbuMb_Fh^uaF_0 jc?m?bbGyY)f%N3*m#X-rb81yl(n$b5OyH4h^jj z?;S>*F8#NTsyxwu`zS6w^xr;oqkHS{Nd33A(yL}}@yzu+)X;Z7uD%@>8n5(9>nI8; zWWMo*T3Et*8j8u8h>G9nHgK8^|8CpAX~WxX*gzIUq %yV^w8t3upxNUace9#R_-3US>Dy7DPR zH-)(8{clr sI!>Z{|SY-y7{zE zl2~;tT?%o}JK8P^aRFh4xZp84q4Rh&3#GaLe^7{f&ql_}6Dq_-9x>@zw!oTrkqU9s zhtdxIM+$LoB3j;6PL+6iQ;54 @oX!^J)DhX;)xaF))?PH z#uF>V{p6=%Li-~X;(l_LPRdb;YgD_+(m1RU_xThA%r=hJ8gZwykYvIM#QW-x#-WCr zrP-G&$h~>GS!8~hg4|gsU@Z$w;;*A1cN5oL-cM+6tUJ4cI~AQf kN}=GnIX}UEB2_!we3-nJ4x(IQ1C9W+|zKfKvd)o z7Kn=6egaXE+eaX(9OYh;s5dH BKPasgRLU>A}1PD exrbo}5QDqzeS^fby<-qp+v|cr^tiSI#wx0<1w^RUtBPDx8gX9O_ES7s zPhJ*YIbNG>tH}N4;mG?&EYL;JRWuG~upaoiA1cE%;+@V$9agp qUSN2^Q-L6iU zbJBmXKT0Ncwkei{jHg-6x4{Sz-MCj}&dMaM+RARaakH`NZGR*eT+%3S#Qtc2eh0 L$EcL`h|cCwTyo7meir45qW_ypeM~7y_JZ z!o4-OO5no44Mw7whm8*g&6N^i6-SLi^G4f7iHoo3`o5hAKhi0$yDG)Hg>ww&z#wln z-Dp=k3PBe!lIOQtcTY99OMLa;9Hcz!g{{VA#ti*NEh@III$w@_28a+m&$Pf=7e4g2 zzD+Ychgi++4r?lC-P)rnq~tnE_!fw4nd>A+^}7o%mwhrZr4v)|RLez(rprgOeS6d= zO?WMLNMwkL2;H`bZ@5+L_4@3MX8Xm I5|qfxsj}$AfKM?%H|l})Yttw(<>zSf^}rqQ^MA}coYYVK(Q7>GhiUuc z${xCjvd`w&MIU}pfKRhb;XMsMXINmy2 i-}^sUw=|1pn$$98FRi2rB9+R;a;6~fxl?~TJ;rMl$xRda5T${3Oy zd3HcHr@kNhl%wU)@8x_Z#hQLecs%;xTy`Fx5_w)|6e>%MdX`6KVIhaWG3nCOEP4Zc zd-0UnYP0|^pHUX&4^3ZECd?_G@4IEMKXdwgzJgU;s0@9;twqtX(*89#du}e1&FB~W zxU)H|w`<`#p%2|cPDbPn;=b1QYjjo68JYvb{1g7l*k-L~rzh%nWP=ro;f$?0Xia_J z-#8hPuJSide|3d)9@zT7Aa5Lph|XG?eXhijZ9Vz`F*e5TE`nKf_5H%GU%lG8>pso5 zueQ!u;?O`358-y-b@osD&mp!Lj`!Y@q{lS*-PTEUI?{PM<>mmKq%`PIU@{W)YAs0C z$Jc33XWO2BVmwWd&(H_br*8Cz`s7b|&mTILd*BOsAgwyT7?G^zK+Y3F`h3yTwO=aW zy#Hbv=Bh?;sNA5NJ!4v#r{NBKfF^> lzq zb$pN|ZU^7_g)Bk$*;kFFs=e0BnN0oS?Gody?T2{karT%c2aoy=41CE?U`<+E@hn+O zlbdqBhBeV6f+J~4DPrg4v@DAOSKpi)vqz59DP*iZW$o<_9b-s=3?DLb$R**>0pE6R zH?fFs=9V4@q$r^4b<9J@lzrO!?$l0sSMxj<5-Zb>m|=n?NT2|_D0xvAH7I0QtdNQO zJ(_tKvOPELAeGLPRQL_P-^s+ nJ=g@#ux^GYXpUE{ZwY%4mtMy` zdD-kT#=b{X9jwOZtT&0DvoK!6%*}kuA9^XrlfM`1d(0Ud7u{|%Ik|RN`|DOdG1q6r z1{16?I=LhQ`+2%b^zuJvamYnhSH{cONPldZdayI)YQEYRt-cIG5jmdDW*H}iH2NvA zXgf!$iFMgbydF8^ABJ4ZTij0d*P{@5ob|{8DVHQ npw}3AsEltK@!{1nR%n)CuKi>d2T@PY-k9ymfU~yL<&J9ht@~pg zsbzbf*zY^=DK|Z`I8|Q)#5N!|KM<`AqzObvgjXQiA^fxJ@?7pZ4#J-1X1&T-$G6IG zwWs&6zh2u%wWs3C<-V>x*>NWm*ksh9a3>h2b<*&_(vjDOHIGxx3MDOMLMqg4%m2u< zG{pMJd}m0u7SG_YTUf2_@uAq!aCI78P`uu`56<9JF*em 1t$8(4-nZr^QMU)K7yX6e z$OG3;c^em`w#}qp_VU1WdywMw^1$`3MHICA1J`3eavIco(vn!eGQfG;himmbayZOd zF+21mmL+5T*2{mE FA5+U{qO65&=u9G-(S%t(!U9u$k=_u#4Agc&UD^ zGa+fi!|qAl}W!VVpe1hJ4O)*{|ruTPSJsCDM^mN|0!6*EN}Rld XkX27H zll;60td$0~ShuqcVcI}V-QM<8lXBOjVC{hjqV&=bm-9K2MXRc$TmK#(B`Ad84-00! zBIKOUPopJ*M<^S2;j|FIWpNa_G4`${Qu5t?qnCl{`BrVg&HY3nNT5$=N+?!)N!!&q z&I0Wm_pbgc>~fOi&LgRM{h@bR*%w$JOb}s2b~jwpjC9GeUhL@tStLxM^@#0~9vNmk z!=bWPtm!2> Ct{ZaWhL_dg=sbxtI`?UY(s{cWdi36hm`YjV#_nu1YR2SRS^ z!Fzhk4da8dp7>^OPI}yycYu#0iI%6cHuUPGL#>Q(>QOw_6w1nva1Rr@{_#58*rS S#BR!2%5`H^JUW8LYM5t6CBi-t*er=)B!pCRzmQ8EXmAzy>l%Hj7up{f%TBR9RMK}mW|MUBQmIAG3NCQ{u z0~@L-=DVK_(` hN3LD;F!`p258yoJnVXF-f+t5AL#Gh)z(``7@hIuwzYQrmR zc)bmOXu~vFnD85H!#*~A?<`~gk?l`SGvA3e9BadwHoVY=SJ-fa4R5#MRvSKL!#8dC zfenw@aKLnv&M7v$(1wLJth8Z+4R5yLW*gpX!-s6R(}pkF@NFA**zi*u#-C}@_1f@s z8=hms`8NEz4XbUq!G@b`xY>sH+VBY*9 d$J8PZ0NV)*KN4UhBw&odp7*J z4Ii-K9vi-9!)bOs>dNKMGj=^bWWz&Fy*eIF05^{lrEW?MDl)L}pn=caZD7w}?$3;U z-6_4hNBVaqeXvZvWhs-7X+ 5lf9K$B+5tt0KOO70fdIn~UFN*aWqGWIRR0(`9SQqm;?N zf}WCJu0`s6O4%h}PJRrm b5 z_^R#UZ!!5O(IxNhvJl^;5x(=Gab-l<1-N(rmV7wrDq5MOr<93bz9l{>hr}cKmhh~6 z{AaIRd3J5ML6z`3-J8$PE68eo_##~X 9U$&QBAml&o8Rf zpQNiuOA)`st%y_N!&DM}wIVKwN6jr=rU;`J6a|7cB{=Y#TT^ah(4{O`Qycz*UZo|K zr4bejgXSy0s#5z}5VT=YK;n_`5=P-q;YZ;vNhnuTbWCiYICtOpgv6wNp5*=m1`bLY zJS27KNyCPZIC-RZ)aWr|$DJ}h?bOpIoIY{V z5Z6Eh{c5UB05M{E90pR#sM3f1{>0 z5WMQ@RjaT0=9;zFUZ>_%)#R)y4;0i?6_-lwuB0s$Q};Erf>Je!mQ 1^kQj $ap5>jf{=b z56da_3cf0J|1H;JTV!0~UQU|jxL5G^8rz@ro_O86O#I@n1ovX?E k%|D6Jgeb?QlKSvM87ZZSbtSekQhK$|E6Kmfdw^aorI%W)CB_Qvr%Ely zPU4d~bxJ1VQx}~kYC5eXZ5dN#%<-x;W`ttCYSgKGEhoN8zNO5PC$W*1AoP?H9Z#uB zokwXwW)6_@Nehb%nXU6Aqp9R;lCE88PfmSL3D qbeZN0_i)ooDPv6H7R z`c6@2h2wMb^VRC}YSQXG#op`G&|wOrhLiuVo}Tn9>9hZx^rnZ?tEP>bHgFYj)extw zIx3*r@jc1un_U!h@;@yc-&fE7<>Xw}N~=gWKpz$gIbYHuom%Wl&8hD*)QoU?z14RW zwJP;xMndV|ReH3LQL~gWQbw&(9fQ-39B9gOMvwL+xsn)Vd@y5 MC@_T%IE1|lKf kF|&gSBdxJJjbsld zzrtj*-;$G6{j ?eC%Xx7YqY$^PD&X#8`vLjSVtZ@HWyzm5ds&J_Ut+hTu@w7*;9jl0+WuC~8N z+23_; ()`k9?#x3GPbjc&-~JeK}L)U`k?&MDuWdjps?}#aHhxMYIGmf zCn`B6CnqOXe$&&5OFVir3YNsV)miE3iwoeNd%e1exeLn*`6;!kdKEu6K6rV-?FP8{ zC!hcMK>_b^|I!!-&A;Q_j<@ksGhg z_+~wSSQ@T(7$RMZxp=D*v4D z-v6|L >tB@XtNnArAK#+?S(|^<10 RkcF}imB>egLf-?09MZ*6GY7`n0Prf+Zh&duMw z<<{?g|F$3e@JF}*_$NQze8-(X`}r^Kx_iqne|68jzy8f{xBl0C_doF9Ll1A;{>Y<` zJ^sY+ns@Bnwfo6Edt3HB_4G5(KKK0o0|#Gt@uinvIrQplufOs8H{WXg!`pv+=TCqB zi`DjS`+M(y@YjwH|MvHfK0bWp=qI0k_BpC+{>KcO6Ek4G5`*U7UH*S}`u}74|04$3 ziQP4W?B8AfSk8mxfZq9y;9F$LoF6iZ-M*Xnj$BLJ)Z?4mzunw7_4wuvcsKW(dwhSl z$G1FL8JV6uYZ>`1(kHT}ZpO$-{CTAguW@mCWl7c53j#%fa`>UxFRCrAnYZkU(&9jF z^bUmD*u3VdRH *`q0Mc+_&!}WE8Vq;m+tzW+$!l$R#71V7|Zk0AZqhN6z z>opd21qB-j>P@TLP)8`mvaYPG% X6^@^t?zN?XK!meeS#+g*)&@!_eR(BCFW1F#!gsk>1p~c#u=CgD4_bbS zzeUuG!zXcg%f-};a3_RUA-hr8K?uJ?ILL Q+pNIj<;)4aPup!stnXrRd~ya zDoZL#YrH+n*;RilN&{41dB9s-RZ{A$TJEiOc=Zy~B+^}laek9&Kegm&GVMTeF&Q`6 z)jPkORn>Gb(=trW6Yt8E 6X0`$U sb$wOqb8}>qxrm+(r5?Db-CO(vLS-D}-6JaPCBN zVjSsTr#yblcyEzi3TZ`=p-JI*|D(o3+KP&*t0iIy-J>}eq8%5mdyV!;rI&PyYE}fL z!fU;0rB^Xhl`r>}uB;BMKJ_1`w~VG{4`M}Rw77`Y;524wu-=uWE351y!O?b49IZ!G z>4#o*ydC_r1=$O3T{GeF-?yBX^Mk`lj~;vLYw0eEI_K=AGC$QWy_iP0dMW2+GEvno ztu0?!T~T_uGY&5;DX$GI4V*b`Qgw+Lhz*%e_*dfYKhUiPmL#fy(-PFc`JVkr%?Z_S z%rWu;cY2k25|bqY{rsNtD)lDD`R;#Gj5=w`;OdmZLFp1k;@dY$slQ{sW`}VNjaNeh zNopu*3|*L@hEC(VCZ&1k#H8sXcYD;ZKtDC4B#HDBm1k;vO`q17{ZYcqSi>9$aK*={ zc*5XP?MiT|1WM)_6t4zN^Qb{nk~{jfChm`Kc2~z0_9^HuY3(MB0I;MlX}Q(V`6>II zytSOJ)E_VbCvUv(5kq|ahsUbnvs0T*NtAN@Z |uz2brSq&?pKBo0k!)_k5e?W6`fh#p$rBZLH)LSZbkUC%6 zSN9*(M-3`*QwMQU2fDpTxpHSJwFDC`SDz@=XMWU|){ErtGH%9vgn7r#PZaF4AsFYo zHyRe7%Xu-zNvnVVKB_-?>_0_XaD1Udt9!DPdLHxFFGz@AU)`Sis`&YR!uj6j<4k?F zQbRvC(1o6)L|1?1@+K;8Nq^;Cn5?|e#alDH MYWcpDQj(#kqc@`;E{~o8&% x%-G@%@t4 zZify%esd{8`b!yWoIFS!)kLKa9qA@b_Tn{N{Ym @RUni3*Pi z*Oe%BD`usgrpcG-A5I&c%QB(>v%&UL3NH6Iw?yW13T rdLxd&{Xi z1Z14Bavf_KCLDG^j2bX4Ne#F;p}?j4qutMj$D2B&Zim-&)t^JF*RMb`(3L2N?VgA9 zp%WA6D;KF@3k&Ek^VBfc`O4HhnOVblL8e^86V&iPD(zzk?PIVS?i!#>uf$D{iS%#k zb13y`_wVNZCuldnLJs9*1ZA9dWBNP&yu=<)=cjZ;_V?v1xqgNDi=FR@;JYwG>^|U1 zajO)@mK4U86xveCl>W{AkGI?J(BWq=>i>Y5;)K`vC+!l(*@fY8w%OGq|1KF{Ih1e> zaWlsERYMj6sk oRm1Nj|E>M^dzzD~6AKg4<7vbFWlUo18OFRcY|4-h zLpxLF(oeRs6M7rtJ|-~{mmaGaqsUL{G`C8fV)sQU7jaO=Rx`VGjSWBk9%BQhD-Oa@ zC#lp)Ds&-^>Y?cgYUH%L)JWIus{3q1qSW>N7}6djeX}2ZGl{;Ls0Q7fT&-!bFrG1h zaey(v_+j26e}l;1p!v2R>d?curTyss>el_Wuh5P$$*F_ITTyR_DWDDny2i$ Lh+95aM;2Ttu*(=%LpIGl%Y{gmgvglZ>USHCFLZ%Vv)(e0)u>`AZ3pI2%J zM%s$N{zKwvgRC_e2Zqca*x|GWhenGIDD_9oqc)99AB$K=F#kGzOyb;gkn!mSrCxPt zdNO1E%?Yi2_s2EIR>u@Z7eu8CO}l8(HNOu%GeM1;_KoOquI16awJGl~^7|$2_6My> zJ&keN?TO~TEB~O>Z!yl?XWDWJZTV}xw&fPatuIS=`}<10k8#p Vm~)T#81>lyP;k5VVO8qHdferUe&1l`l!_)F}g66srs z^UeCuH8N3+4D?qcOOol+{nW^=G2dS6bQ?cfSp%IYudR~Tp;Hso=s>A!bV-S8^t58v zXxG z7)@6QM zrV8#-&5pb~Ulw+oqq_XqUN!iSe7vE{f8^s09sak;$B%SHii0+};JeN-{GmK{)Qi=G zm<6T6AS@^flr2`*@)gOgg?nc>xN3` {{{b*X*tc{w}+L*u_QVfw@&R z3t%) y6x>0Nv!l^KXP`BFU4aekD>Pi!;#1xt_TfT*hog?g9rEU?5EC__%Kb0~_J{PX8 zE>)T0I;X0#wyL6ZPN1g3#8RU!)%L-f8ki>83 zj#*S$rkg}b&Z=TWzX=Zkh*YWjrJN^pj*8B$%`ROQT(P3Grl6*@7GkJVV&(@bE-t5% ziYgXW!nb0-Gg9pGs;aIGR?mf1E(wrnVG5;+%bcQW O89(N@`42punm8KtTHlJ;YI8{#E8#scxLDh2n=VTL +@7t?@rvs7y&4dY@6qz+O86{UfmROHZWK}9L@ z{F9^e=HwSu(~4 eHm z>RPTqEG#FTT1inb^=*565sSsj7oAsCRFYS|tcEKOl=?N@2IiLO_3<~_LlMN!&ee&RkD tBlgoV z^39a1zd26P-%M*d%zWE^femGLk@zpcNZKrZb-0y4FNUc}4acy+)cKcki2pi_ M`QpfRX$lAEPCLe`0^%0hIjx93$!7jS+tjW28*aVZ{9vjJT&l6rqn8q07Ja zmwdvXN!NSA-@i6r|F>d4vGASA!HI>x{%_^*U!Tqin}9t_pRfsd|MhwMH>B{tyh#+~ znDv({Dn<_ =`)vOY;s5zN-?{T7^`|?nJ2~j=@e9X)?HxMAMNB9cz4rCjyz27Tu6S)q z58sT(FC2Qa^%JGexYmS3RaWPm2w#5t-buC%vurrih8Z@TX2WzFrrFSI!&Do(ZFsbg zq4Rq-Y_;JVHauj*7j3xThR@ir#fH0W*lfecY`D#a57=<44Y%0vHXGh(!v-5V@vpJJ z12(L%VWAC|*wAmo3>&7~@N^q`ZRob)(O6UNzD)S82s(Gz_LdD>ZFtCr`)$}_!)6<9 zwc%zPZnEJj8y4EIz=jz%Ot)d04ZSu@wPCUi-8NJ 67^?HGPnht$A)*?=`K|O{LVnuoY>z2TssI^0Ps5CKFk~7 z&j6E9R9ctjQiFiYFk8mDR0%L`2)ujz2%N`-=uO}Sz@=>5mx2pCG*YPtzy-dIkvNr? z^BzpW7?<(_zrZX6SED%3!bn;HVC-n(#NG|e!PJqi==^LH96vV#Cyp_AI&kh-(!#$V z*ou*~1b%OvDeq<=dcbs8fp=rX&lX_9cw?UkoMq!J!23@{R~d0W0PMtkB>6c_snalu z{G1LfJ{=x`&;*z;k>Y_T0#C&hh#%nBXaq~ZmjZWUq%6CE?_wkm9|6xzM=lThEZ{dW zLgzKWUt`42R^Z4plzNPp8@<4DFcN WNV zux2J@!A}4 ;->+am1XP&M*H9i5q}Ku zo3qhD1il7%6GrmC3HTbDjxy{;R_WCo@+mlQyB`@O@W+4y&nHgsrNA{92`lh+8yEOC zM)IaEpqerJ@t+R#V-A5A058J40bU3!!nA^y0H^06j|-jwtipT*UJZ=TC;!x4B9Lo1 zDj+X#0x!l$9+m+AhLL*z2v`SmOz0`F`cmq0Jn;ZeTS`9#KOOiOW+Ax1GcKp!flmVt zDB_F}96fnzCPw0~Sf