From 031a447f16d388a8d7766c588b09ac44dcb095be Mon Sep 17 00:00:00 2001 From: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Date: Sun, 16 Jun 2024 15:02:31 -0400 Subject: [PATCH 01/68] Reusable notification workflow (#665) * Create user_notification_system.yml * add email function to helpers * add failure notification when checks don't pass * Update score_new_plugins.yml * switch from Failure to False * change failure to false * add failure label after email notification sent * [still testing] notified failure check and removal * addition and check of failure-notified label * change to GITHUB_TOKEN --------- Co-authored-by: Martin Schrimpf --- .../check_if_pr_is_automergeable.yml | 135 ++++++++++++++-- .github/workflows/score_new_plugins.yml | 102 ++++++++---- .../workflows/user_notification_system.yml | 146 ++++++++++++++++++ .../submission/actions_helpers.py | 28 ++++ 4 files changed, 369 insertions(+), 42 deletions(-) create mode 100644 .github/workflows/user_notification_system.yml diff --git a/.github/workflows/check_if_pr_is_automergeable.yml b/.github/workflows/check_if_pr_is_automergeable.yml index 0669eeed9..4cc627632 100644 --- a/.github/workflows/check_if_pr_is_automergeable.yml +++ b/.github/workflows/check_if_pr_is_automergeable.yml @@ -1,39 +1,111 @@ name: Check if PR is automergeable -# Triggered on all PRs either by +# Triggered on all PRs by # - completion of CI checks (status events), OR -# - tagging with "automerge" or "automerge-web" labels +# - tagging with "automerge" or "automerge-web" labels, OR +# - updates to current PRs +# # This workflow checks if the PR that invoked the trigger is automergeable. # A PR is automergeable iff it: -# 1) is labeled "automerge" OR "automerge-web" (originates from web submission) +# 1) is labeled "automerge" OR "automerge-web" (originates from web submission) (checked in actions_helpers.py) # 2) only changes plugins (subdirs of /benchmarks, /data, /models, /metrics) # 3) passes all tests (Travis and Jenkins). # If all 3 conditions are met, the "automerge-approved" label is applied to the PR # (This label triggers the `automerge_plugin-only_prs` workflow to merge the PR.) +# +# If any test fails, the user will be notified by the brain-score email account. +# If the user has already been notified of a test failure and there have been no pushes to the PR, no email will be sent. on: pull_request: - types: [labeled] + types: [labeled, synchronize] status: permissions: write-all jobs: + check_trigger: + name: Check what triggered this workflow. If it was the addition of a 'failure-notified' label, skip the rest of workflow. + runs-on: ubuntu-latest + outputs: + PROCEED: ${{ steps.check_label.outputs.PROCEED }} + steps: + - name: Check trigger condition + id: check_label + run: | + if [[ "${{ github.event_name }}" == "pull_request" && "${{ github.event.action }}" == "labeled" ]]; then + LABEL_NAME="${{ github.event.label.name }}" + echo "Trigger label: $LABEL_NAME" + if [[ "$LABEL_NAME" == "failure-notified" ]]; then + echo "PROCEED=false" >> $GITHUB_OUTPUT + else + echo "PROCEED=true" >> $GITHUB_OUTPUT + fi + else + echo "PROCEED=true" >> $GITHUB_OUTPUT + fi + + remove_failure_notified_label: + name: On new push, remove the 'failure-notified' label. + if: ${{ (github.event_name == 'pull_request') && (github.event.action == 'synchronize') }} + runs-on: ubuntu-latest + steps: + - name: Remove 'failure-notified' label on new push to PR + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const prNumber = context.payload.pull_request.number; + const { data: labels } = await github.rest.issues.listLabelsOnIssue({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber + }); + if (labels.find(label => label.name === 'failure-notified')) { + await github.rest.issues.removeLabel({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + name: 'failure-notified' + }); + } - check_test_results: - name: Check if all tests have passed and PR meets automerge conditions + check_pr_details: + name: Check all details of the PR (if all tests have passed, PR meets automerge conditions, pr number, label is 'automerge-web') runs-on: ubuntu-latest + needs: check_trigger + if: needs.check_trigger.outputs.PROCEED == 'true' outputs: - ALL_TESTS_PASS: ${{ steps.gettestresults.outputs.TEST_RESULTS }} + ALL_TESTS_PASS: ${{ steps.get_test_results.outputs.TEST_RESULTS }} + PR_NUMBER: ${{ steps.get_pr_number.outputs.PR_NUMBER }} + AUTOMERGE_WEB: ${{ steps.check_automerge_web_label.outputs.AUTOMERGE_WEB }} steps: - name: Check out repository code uses: actions/checkout@v4 with: fetch-depth: 0 + - name: Get PR number from workflow context + id: get_pr_number + run: | + echo "PR_NUMBER=$( python brainscore_vision/submission/actions_helpers.py get_pr_num )" >> $GITHUB_OUTPUT + - name: Check if PR has 'automerge-web' label + id: check_automerge_web_label + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + LABELS_JSON=$(gh pr view ${{ steps.get_pr_number.outputs.PR_NUMBER }} --json labels) + echo "Labels: $LABELS_JSON" + if echo "$LABELS_JSON" | jq -e '.labels[] | select(.name == "automerge-web")' >/dev/null; then + echo "Found automerge-web label." + echo "AUTOMERGE_WEB=true" >> $GITHUB_OUTPUT + else + echo "automerge-web label not found." + echo "AUTOMERGE_WEB=false" >> $GITHUB_OUTPUT + fi - name: Get test results and ensure automergeable - id: gettestresults + id: get_test_results run: | echo "Checking test results for PR head $( python brainscore_vision/submission/actions_helpers.py get_pr_head )" test_results=$( python brainscore_vision/submission/actions_helpers.py ) @@ -45,20 +117,55 @@ jobs: runs-on: ubuntu-latest permissions: issues: write - needs: check_test_results - if: needs.check_test_results.outputs.ALL_TESTS_PASS == 'True' + needs: [check_pr_details, check_trigger] + if: needs.check_pr_details.outputs.ALL_TESTS_PASS == 'True' && needs.check_trigger.outputs.PROCEED == 'true' steps: - name: Check out repository code uses: actions/checkout@v4 with: fetch-depth: 0 - - name: Get PR number from workflow context - run: | - echo "PR_NUMBER=$( python brainscore_vision/submission/actions_helpers.py get_pr_num )" >> $GITHUB_ENV - name: Add automerge-approved label to PR env: GH_TOKEN: ${{ secrets.WORKFLOW_TOKEN }} GH_REPO: ${{ github.repository }} - NUMBER: ${{ env.PR_NUMBER }} + NUMBER: ${{ needs.check_pr_details.outputs.PR_NUMBER }} LABELS: automerge-approved run: gh issue edit "$NUMBER" --add-label "$LABELS" + + check_email_label: + name: Check if user has already been notified of failure + runs-on: ubuntu-latest + needs: [check_pr_details, check_trigger] + outputs: + FAILURE_NOTIFIED: ${{ steps.check_failure_notified_label.outputs.FAILURE_NOTIFIED }} + if: needs.check_pr_details.outputs.ALL_TESTS_PASS == 'False' && needs.check_pr_details.outputs.AUTOMERGE_WEB == 'true' && needs.check_trigger.outputs.PROCEED == 'true' + steps: + - name: Check out repository code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Check if PR has 'failure-notified' label already + id: check_failure_notified_label + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + LABELS_JSON=$(gh pr view ${{ needs.check_pr_details.outputs.PR_NUMBER }} --json labels) + echo "Labels: $LABELS_JSON" + if echo "$LABELS_JSON" | jq -e '.labels[] | select(.name == "failure-notified")' >/dev/null; then + echo "Found failure-notified label." + echo "FAILURE_NOTIFIED=true" >> $GITHUB_OUTPUT + else + echo "failure-notified label not found." + echo "FAILURE_NOTIFIED=false" >> $GITHUB_OUTPUT + fi + + notify_failure: + name: If any test fails and failure hasn't been notified, notify the user through the brain-score email account (only needed for web submissions) + uses: ./.github/workflows/user_notification_system.yml + needs: [check_pr_details, check_email_label, check_trigger] + if: needs.check_pr_details.outputs.ALL_TESTS_PASS == 'False' && needs.check_pr_details.outputs.AUTOMERGE_WEB == 'true' && (needs.check_email_label.outputs.FAILURE_NOTIFIED == 'false') && (needs.check_trigger.outputs.PROCEED == 'true') + with: + pr_number: ${{ needs.check_pr_details.outputs.PR_NUMBER }} + is_automerge_web: true + action_type: 'send_email' + secrets: inherit diff --git a/.github/workflows/score_new_plugins.yml b/.github/workflows/score_new_plugins.yml index f8d55d060..8e4c8aec9 100644 --- a/.github/workflows/score_new_plugins.yml +++ b/.github/workflows/score_new_plugins.yml @@ -64,30 +64,27 @@ jobs: id: scoringneeded run: | echo "RUN_SCORING=$(jq -r '.run_score' <<< ${{ steps.getpluginfo.outputs.PLUGIN_INFO }})" >> $GITHUB_OUTPUT - - - name: Find PR author email for non-web submissions - if: "!contains(github.event.pull_request.labels.*.name, 'automerge-web') && steps.scoringneeded.outputs.RUN_SCORING == 'True'" - uses: evvanErb/get-github-email-by-username-action@v2.0 - id: getemail - with: - github-username: ${{github.event.pull_request.user.login}} - token: ${{ secrets.GITHUB_TOKEN }} # Including token enables most reliable way to get a user's email - - name: Update PLUGIN_INFO for non-web submissions - if: "!contains(github.event.pull_request.labels.*.name, 'automerge-web') && steps.scoringneeded.outputs.RUN_SCORING == 'True'" - id: non_automerge_web + + - name: Check for automerge-web label + id: check_label run: | - echo "The PR author email is ${{ steps.getemail.outputs.email }}" - echo "PLUGIN_INFO=$(<<<${{ steps.getpluginfo.outputs.PLUGIN_INFO }} tr -d "'" | jq -c '. + {email: "${{ steps.getemail.outputs.email }}"}')" >> $GITHUB_ENV - - - name: Update PLUGIN_INFO for automerge-web (find uid, public v. private, and bs email) - if: contains(github.event.pull_request.labels.*.name, 'automerge-web') && steps.scoringneeded.outputs.RUN_SCORING == 'True' - id: automerge_web + LABELS_JSON="${{ toJSON(github.event.pull_request.labels.*.name) }}" + if echo "$LABELS_JSON" | grep -q "automerge-web"; then + echo "has_automerge_web=true" >> $GITHUB_ENV + else + echo "has_automerge_web=false" >> $GITHUB_ENV + fi + + - name: Update PLUGIN_INFO based on label run: | - BS_UID="$(echo '${{ github.event.pull_request.title }}' | sed -E 's/.*\(user:([^)]+)\).*/\1/')" - BS_PUBLIC="$(echo '${{ github.event.pull_request.title }}' | sed -E 's/.*\(public:([^)]+)\).*/\1/')" - USER_EMAIL=$(python -c "from brainscore_core.submission.database import email_from_uid; from brainscore_core.submission.endpoints import UserManager; user_manager=UserManager(db_secret='${{ secrets.BSC_DATABASESECRET }}'); print(email_from_uid($BS_UID))") - echo "::add-mask::$USER_EMAIL" # Mask the USER_EMAIL - echo "PLUGIN_INFO=$(<<<${{ steps.getpluginfo.outputs.PLUGIN_INFO }} tr -d "'" | jq -c ". + {user_id: \"$BS_UID\", public: \"$BS_PUBLIC\", email: \"$USER_EMAIL\"}")" >> $GITHUB_ENV + if [[ "$has_automerge_web" == "true" ]]; then + BS_UID="$(echo '${{ github.event.pull_request.title }}' | sed -E 's/.*\(user:([^)]+)\).*/\1/')" + BS_PUBLIC="$(echo '${{ github.event.pull_request.title }}' | sed -E 's/.*\(public:([^)]+)\).*/\1/')" + PLUGIN_INFO=$(echo ${{ steps.getpluginfo.outputs.PLUGIN_INFO }} | tr -d "'" | jq -c ". + {user_id: \"$BS_UID\", public: \"$BS_PUBLIC\"}") + echo "PLUGIN_INFO=${PLUGIN_INFO}" >> $GITHUB_ENV + else + echo "PLUGIN_INFO=$(echo ${{ steps.getpluginfo.outputs.PLUGIN_INFO }} | tr -d "'")" >> $GITHUB_ENV + fi - name: Write PLUGIN_INFO to a json file run: | @@ -98,12 +95,64 @@ jobs: with: name: plugin-info path: plugin-info.json + + extract_email: + name: Extracts email for both PRs and web submissions + uses: ./.github/workflows/user_notification_system.yml + needs: process_submission + if: ${{ needs.process_submission.outputs.RUN_SCORING == 'True' }} + with: + pr_username: ${{github.event.pull_request.user.login}} + pr_title: ${{ github.event.pull_request.title }} + is_automerge_web: ${{ contains(github.event.pull_request.labels.*.name, 'automerge-web') }} + action_type: 'extract_email' + secrets: inherit + + update_plugin_info: + name: Updates PLUGIN_INFO with various fields (domain, competition, model_type, email) + runs-on: ubuntu-latest + needs: extract_email + steps: + - name: Download PLUGIN_INFO artifact + uses: actions/download-artifact@v2 + with: + name: plugin-info + path: artifact-directory + + - name: Set PLUGIN_INFO as an environment variable + run: | + PLUGIN_INFO=$(cat artifact-directory/plugin-info.json) + echo "PLUGIN_INFO=${PLUGIN_INFO}" >> $GITHUB_ENV + - name: Decrypt and mask user email + run: | + DECRYPTED_EMAIL=$(echo "${{ needs.extract_email.outputs.extracted_email }}" | openssl enc -aes-256-cbc -a -d -salt -pass pass:${{ secrets.EMAIL_ENCRYPTION_KEY }}) + echo "::add-mask::$DECRYPTED_EMAIL" + echo "USER_EMAIL=${DECRYPTED_EMAIL}" >> $GITHUB_ENV + + - name: Update PLUGIN_INFO + run: | + PLUGIN_JSON=$(echo "$PLUGIN_INFO" | jq -c '. + {domain: "vision", competition: "None", model_type: "Brain_Model"}') + echo "PLUGIN_INFO=$PLUGIN_JSON" >> $GITHUB_ENV + + PLUGIN_JSON=$(echo "$PLUGIN_JSON" | jq -c --arg email "$USER_EMAIL" '. + {email: $email}') + echo "PLUGIN_INFO=$PLUGIN_JSON" >> $GITHUB_ENV + echo "Updated PLUGIN_INFO: $PLUGIN_JSON" + + - name: Write PLUGIN_INFO to a json file + run: | + echo "$PLUGIN_INFO" > plugin-info.json + + - name: Upload PLUGIN_INFO as an artifact + uses: actions/upload-artifact@v2 + with: + name: plugin-info + path: plugin-info.json run_scoring: name: Score plugins runs-on: ubuntu-latest - needs: [process_submission] + needs: [process_submission, extract_email, update_plugin_info] if: needs.process_submission.outputs.RUN_SCORING == 'True' env: JENKINS_USER: ${{ secrets.JENKINS_USER }} @@ -121,12 +170,8 @@ jobs: run: | PLUGIN_INFO=$(cat artifact-directory/plugin-info.json) USER_EMAIL=$(echo "$PLUGIN_INFO" | jq -r '.email') - echo "::add-mask::$USER_EMAIL" # add a mask when bringing email back from artifact + echo "::add-mask::$USER_EMAIL" # readd a mask when bringing email back from artifact echo "PLUGIN_INFO=${PLUGIN_INFO}" >> $GITHUB_ENV - - - name: Add domain, public, competition, and model_type to PLUGIN_INFO - run: | - echo "PLUGIN_INFO=$(<<<$PLUGIN_INFO tr -d "'" | jq -c '. + {domain: "vision", competition: "None", model_type: "Brain_Model"}')" >> $GITHUB_ENV - name: Check out repository code uses: actions/checkout@v4 @@ -144,3 +189,4 @@ jobs: - name: Run scoring run: | python -c 'from brainscore_core.submission.endpoints import call_jenkins; call_jenkins('\''${{ env.PLUGIN_INFO }}'\'')' + diff --git a/.github/workflows/user_notification_system.yml b/.github/workflows/user_notification_system.yml new file mode 100644 index 000000000..9701bf37d --- /dev/null +++ b/.github/workflows/user_notification_system.yml @@ -0,0 +1,146 @@ +name: User notification system + + +# Triggered by the 'check_if_pr_is_automereable.yml' and 'score_new_plugins.yml' workflows +# This workflow has two distinct purposes: +# - extracting an email address (either from a web submission, or from a PR) +# - sending a PR failure email + +on: + workflow_call: + inputs: + pr_number: + required: false + type: string + pr_username: + required: false + type: string + pr_title: + required: false + type: string + is_automerge_web: + required: true + type: boolean + action_type: + required: true + type: string + description: 'Determines the action to take, e.g., "extract_email" or "send_email".' + outputs: + extracted_email: + description: 'The extracted email address.' + value: ${{ jobs.extract_email.outputs.email }} + +permissions: write-all + +jobs: + extract_email: + name: Extract user email + runs-on: ubuntu-latest + outputs: + email: ${{ steps.set_email_output.outputs.EMAIL }} + steps: + - name: Check out repository code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Python 3.7 + uses: actions/setup-python@v4 + with: + python-version: 3.7 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + + - name: Installing package dependencies + run: | + python -m pip install --upgrade pip setuptools + python -m pip install "." + + - name: Find PR author email for non-web submissions + if: ${{ !inputs.is_automerge_web }} + uses: evvanErb/get-github-email-by-username-action@v2.0 + id: getemail + with: + github-username: ${{inputs.pr_username}} + token: ${{ secrets.GITHUB_TOKEN }} # Including token enables most reliable way to get a user's email + + - name: Update email for non-web submissions + if: ${{ !inputs.is_automerge_web }} + id: non_automerge_web + run: | + EMAIL=${{ steps.getemail.outputs.email }} + echo "::add-mask::$EMAIL" # Mask the EMAIL + echo "EMAIL=$EMAIL" >> $GITHUB_ENV + + - name: Check if pr title provided + if: inputs.is_automerge_web + id: get_pr_title + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + if [ -z "${{ inputs.pr_title }}" ]; then + echo "Fetching PR title because it wasn't provided" + PR_TITLE=$(gh pr view ${{ inputs.pr_number }} --repo ${{ github.repository }} --json title -q .title) + echo "PR_TITLE=$PR_TITLE" >> $GITHUB_ENV + else + echo "PR_TITLE=${{ inputs.pr_title }}" >> $GITHUB_ENV + fi + + - name: Update email for automerge-web (find email from uid) + if: inputs.is_automerge_web + id: automerge_web + run: | + BS_UID="$(echo $PR_TITLE | sed -E 's/.*\(user:([^)]+)\).*/\1/')" + EMAIL=$(python -c "from brainscore_core.submission.database import email_from_uid; from brainscore_core.submission.endpoints import UserManager; user_manager=UserManager(db_secret='${{ secrets.BSC_DATABASESECRET }}'); print(email_from_uid($BS_UID))") + echo "::add-mask::$EMAIL" # Mask the EMAIL + echo "EMAIL=$EMAIL" >> $GITHUB_ENV + + - name: Encrypt and set job-level output for email + id: set_email_output + run: | + ENCRYPTED_EMAIL=$(echo -n $EMAIL | openssl enc -aes-256-cbc -a -salt -pass pass:${{ secrets.EMAIL_ENCRYPTION_KEY }}) + echo "EMAIL=$ENCRYPTED_EMAIL" >> $GITHUB_OUTPUT + + - name: Write email to file + if: inputs.action_type == 'send_email' + run: echo "$EMAIL" > email.txt + + - name: Upload email as artifact + if: inputs.action_type == 'send_email' + uses: actions/upload-artifact@v2 + with: + name: email-artifact + path: email.txt + + notify_user: + name: Notify user of failure # only necessary for automerge_web labeled since github auto sends email on failure otherwise + runs-on: ubuntu-latest + needs: extract_email + if: inputs.action_type == 'send_email' + steps: + - name: Check out repository code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download email artifact + uses: actions/download-artifact@v2 + with: + name: email-artifact + + - name: Send email notification + run: | + python brainscore_vision/submission/actions_helpers.py send_failure_email $(cat email.txt) ${{ inputs.pr_number }} ${{ secrets.GMAIL_USERNAME }} ${{ secrets.GMAIL_PASSWORD }} + + - name: Add failure-notified label to PR + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GH_REPO: ${{ github.repository }} + NUMBER: ${{ inputs.pr_number }} + LABELS: failure-notified + run: gh issue edit "$NUMBER" --add-label "$LABELS" diff --git a/brainscore_vision/submission/actions_helpers.py b/brainscore_vision/submission/actions_helpers.py index 0fa92f6c7..6593cabd9 100644 --- a/brainscore_vision/submission/actions_helpers.py +++ b/brainscore_vision/submission/actions_helpers.py @@ -13,7 +13,9 @@ import os import requests import sys +import smtplib from typing import Union +from email.mime.text import MIMEText BASE_URL = "https://api.github.com/repos/brain-score/vision" @@ -85,12 +87,34 @@ def are_all_tests_passing(test_results: dict) -> dict: return False else: return True + +def any_tests_failing(test_results: dict) -> dict: + if any(result == "failure" for result in test_results.values()): + return True + else: + return False def is_labeled_automerge(pr_num: int) -> bool: label_data = get_data(f"{BASE_URL}/issues/{pr_num}/labels") labeled_automerge = any(label['name'] in ('automerge', 'automerge-web') for label in label_data) return labeled_automerge +def send_failure_email(email: str, pr_number: str, mail_username: str, mail_password: str): + """ Send submitter an email if their web-submitted PR fails. """ + body = "Your Brain-Score submission did not pass checks. " \ + "Please review the test results and update the PR at " \ + f"https://github.com/brain-score/vision/pull/{pr_number} " \ + "or send in an updated submission via the website." + msg = MIMEText(body) + msg['Subject'] = "Brain-Score submission failed" + msg['From'] = "Brain-Score" + msg['To'] = email + + # send email + with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp_server: + smtp_server.login(mail_username, mail_password) + smtp_server.sendmail(mail_username, email, msg.as_string()) + if __name__ == "__main__": @@ -115,6 +139,7 @@ def is_labeled_automerge(pr_num: int) -> bool: 'jenkins_unittests_result': get_statuses_result('Brain-Score Jenkins CI', statuses_json)} tests_pass = are_all_tests_passing(results_dict) + tests_fail = any_tests_failing(results_dict) if tests_pass: if is_labeled_automerge(pr_num): @@ -122,4 +147,7 @@ def is_labeled_automerge(pr_num: int) -> bool: else: print("All tests pass but not labeled for automerge. Exiting.") else: + if tests_fail: + if is_labeled_automerge(pr_num): + print(False) print(results_dict) From a46ee96b1f7c0c5399a68971564782bf9d5d5f3d Mon Sep 17 00:00:00 2001 From: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Date: Mon, 17 Jun 2024 10:57:59 -0400 Subject: [PATCH 02/68] fix email workflow bug (#919) * return after printing false * updated if else --- brainscore_vision/submission/actions_helpers.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/brainscore_vision/submission/actions_helpers.py b/brainscore_vision/submission/actions_helpers.py index 6593cabd9..6b59216c3 100644 --- a/brainscore_vision/submission/actions_helpers.py +++ b/brainscore_vision/submission/actions_helpers.py @@ -147,7 +147,7 @@ def send_failure_email(email: str, pr_number: str, mail_username: str, mail_pass else: print("All tests pass but not labeled for automerge. Exiting.") else: - if tests_fail: - if is_labeled_automerge(pr_num): - print(False) - print(results_dict) + if tests_fail and is_labeled_automerge(pr_num): + print(False) + else: + print(results_dict) From 983c8a36460bb53d6a54076ecabfc3db3f5097b7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 11:48:54 -0400 Subject: [PATCH 03/68] Bump tqdm in /brainscore_vision/data/sanghavi2020/data_packaging (#810) Bumps [tqdm](https://github.com/tqdm/tqdm) from 4.47.0 to 4.66.3. - [Release notes](https://github.com/tqdm/tqdm/releases) - [Commits](https://github.com/tqdm/tqdm/compare/v4.47.0...v4.66.3) --- updated-dependencies: - dependency-name: tqdm dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .../data/sanghavi2020/data_packaging/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/brainscore_vision/data/sanghavi2020/data_packaging/requirements.txt b/brainscore_vision/data/sanghavi2020/data_packaging/requirements.txt index 577555927..653f0ad6d 100644 --- a/brainscore_vision/data/sanghavi2020/data_packaging/requirements.txt +++ b/brainscore_vision/data/sanghavi2020/data_packaging/requirements.txt @@ -1,4 +1,4 @@ brain-score @ git+https://github.com/brain-score/brain-score@1e4eacb8e1a2c98e55083218ee68a1fc7680d0b0 brainio-base @ git+https://github.com/brain-score/brainio_base@f6553fad46f3451ce51b8916386de1235630de72 brainio-collection @ git+https://github.com/brain-score/brainio_collection@dad8991adb0def87e496553cfce9d0ee33b53c21 -tqdm==4.47.0 +tqdm==4.66.3 From b5eaef449a41a1d1ac05f6dc95761f316472d049 Mon Sep 17 00:00:00 2001 From: Ben Lonnqvist Date: Thu, 20 Jun 2024 16:41:19 +0200 Subject: [PATCH 04/68] Add composite benchmark to Scialom2024, and update AccuracyDistance (#929) * add composite accuracydistance * make accuracydistance more robust and interpretable * add explanation blurb about accuracydistance * rename variable to be more descriptive * update accuracy distance metric to be relative to floor and update expected scores accordingly * remove redundant line * remove redundant line * use precomputed features to compute correct expected scores * add correct test score * add comment to score computation --- .../benchmarks/scialom2024/__init__.py | 2 + .../benchmarks/scialom2024/test.py | 92 ++++++++++--------- .../metrics/accuracy_distance/metric.py | 13 ++- .../metrics/accuracy_distance/test.py | 2 +- 4 files changed, 61 insertions(+), 48 deletions(-) diff --git a/brainscore_vision/benchmarks/scialom2024/__init__.py b/brainscore_vision/benchmarks/scialom2024/__init__.py index d4e2e9925..1ee985125 100644 --- a/brainscore_vision/benchmarks/scialom2024/__init__.py +++ b/brainscore_vision/benchmarks/scialom2024/__init__.py @@ -26,6 +26,8 @@ # composites benchmark_registry['Scialom2024_phosphenes-allBehavioralErrorConsistency'] = lambda: benchmark._Scialom2024BehavioralErrorConsistency('phosphenes-all') benchmark_registry['Scialom2024_segments-allBehavioralErrorConsistency'] = lambda: benchmark._Scialom2024BehavioralErrorConsistency('segments-all') +benchmark_registry['Scialom2024_phosphenes-allBehavioralAccuracyDistance'] = lambda: benchmark._Scialom2024BehavioralAccuracyDistance('phosphenes-all') +benchmark_registry['Scialom2024_segments-allBehavioralAccuracyDistance'] = lambda: benchmark._Scialom2024BehavioralAccuracyDistance('segments-all') # engineering benchmarks benchmark_registry['Scialom2024_rgbEngineeringAccuracy'] = lambda: benchmark._Scialom2024EngineeringAccuracy('rgb') diff --git a/brainscore_vision/benchmarks/scialom2024/test.py b/brainscore_vision/benchmarks/scialom2024/test.py index ae306c0da..c660b795c 100644 --- a/brainscore_vision/benchmarks/scialom2024/test.py +++ b/brainscore_vision/benchmarks/scialom2024/test.py @@ -32,7 +32,9 @@ 'Scialom2024_segments-77BehavioralAccuracyDistance', 'Scialom2024_segments-100BehavioralAccuracyDistance', 'Scialom2024_phosphenes-allBehavioralErrorConsistency', - 'Scialom2024_segments-allBehavioralErrorConsistency' + 'Scialom2024_segments-allBehavioralErrorConsistency', + 'Scialom2024_phosphenes-allBehavioralAccuracyDistance', + 'Scialom2024_segments-allBehavioralAccuracyDistance' ]) def test_benchmark_registry(benchmark): assert benchmark in benchmark_registry @@ -41,32 +43,34 @@ def test_benchmark_registry(benchmark): class TestBehavioral: @pytest.mark.private_access @pytest.mark.parametrize('dataset, expected_ceiling', [ - ('rgb', approx(0.98513, abs=0.001)), - ('contours', approx(0.97848, abs=0.001)), - ('phosphenes-12', approx(0.95416, abs=0.001)), - ('phosphenes-16', approx(0.92583, abs=0.001)), - ('phosphenes-21', approx(0.92166, abs=0.001)), - ('phosphenes-27', approx(0.86888, abs=0.001)), - ('phosphenes-35', approx(0.87277, abs=0.001)), - ('phosphenes-46', approx(0.87125, abs=0.001)), - ('phosphenes-59', approx(0.87625, abs=0.001)), - ('phosphenes-77', approx(0.89277, abs=0.001)), - ('phosphenes-100', approx(0.89930, abs=0.001)), - ('segments-12', approx(0.89847, abs=0.001)), - ('segments-16', approx(0.89055, abs=0.001)), - ('segments-21', approx(0.88083, abs=0.001)), - ('segments-27', approx(0.87083, abs=0.001)), - ('segments-35', approx(0.86333, abs=0.001)), - ('segments-46', approx(0.90250, abs=0.001)), - ('segments-59', approx(0.87847, abs=0.001)), - ('segments-77', approx(0.89013, abs=0.001)), - ('segments-100', approx(0.93236, abs=0.001)), # all of the above are AccuracyDistance - ('phosphenes-all', approx(0.45755, abs=0.01)), # alls are ErrorConsistency - ('segments-all', approx(0.42529, abs=0.01)), + ('rgb', approx(0.98484, abs=0.001)), + ('contours', approx(0.97794, abs=0.001)), + ('phosphenes-12', approx(0.94213, abs=0.001)), + ('phosphenes-16', approx(0.90114, abs=0.001)), + ('phosphenes-21', approx(0.89134, abs=0.001)), + ('phosphenes-27', approx(0.77732, abs=0.001)), + ('phosphenes-35', approx(0.78466, abs=0.001)), + ('phosphenes-46', approx(0.78246, abs=0.001)), + ('phosphenes-59', approx(0.79745, abs=0.001)), + ('phosphenes-77', approx(0.83475, abs=0.001)), + ('phosphenes-100', approx(0.86073, abs=0.001)), + ('segments-12', approx(0.86167, abs=0.001)), + ('segments-16', approx(0.8202, abs=0.001)), + ('segments-21', approx(0.8079, abs=0.001)), + ('segments-27', approx(0.78282, abs=0.001)), + ('segments-35', approx(0.77724, abs=0.001)), + ('segments-46', approx(0.85391, abs=0.001)), + ('segments-59', approx(0.83410, abs=0.001)), + ('segments-77', approx(0.86227, abs=0.001)), + ('segments-100', approx(0.92517, abs=0.001)), # all of the above are AccuracyDistance + ('phosphenes-allBehavioralErrorConsistency', approx(0.45755, abs=0.01)), + ('segments-allBehavioralErrorConsistency', approx(0.42529, abs=0.01)), + ('phosphenes-allBehavioralAccuracyDistance', approx(0.89533, abs=0.01)), + ('segments-allBehavioralAccuracyDistance', approx(0.89052, abs=0.01)), ]) def test_dataset_ceiling(self, dataset, expected_ceiling): if 'all' in dataset: - benchmark = f"Scialom2024_{dataset}BehavioralErrorConsistency" + benchmark = f"Scialom2024_{dataset}" else: benchmark = f"Scialom2024_{dataset}BehavioralAccuracyDistance" benchmark = load_benchmark(benchmark) @@ -75,26 +79,26 @@ def test_dataset_ceiling(self, dataset, expected_ceiling): @pytest.mark.private_access @pytest.mark.parametrize('dataset, expected_raw_score', [ - ('rgb', approx(0.92666, abs=0.001)), - ('contours', approx(0.26708, abs=0.001)), - ('phosphenes-12', approx(0.87666, abs=0.001)), - ('phosphenes-16', approx(0.83666, abs=0.001)), - ('phosphenes-21', approx(0.83166, abs=0.001)), - ('phosphenes-27', approx(0.73666, abs=0.001)), - ('phosphenes-35', approx(0.72416, abs=0.001)), - ('phosphenes-46', approx(0.59500, abs=0.001)), - ('phosphenes-59', approx(0.50666, abs=0.001)), - ('phosphenes-77', approx(0.42083, abs=0.001)), - ('phosphenes-100', approx(0.33166, abs=0.001)), - ('segments-12', approx(0.81500, abs=0.001)), - ('segments-16', approx(0.73750, abs=0.001)), - ('segments-21', approx(0.69666, abs=0.001)), - ('segments-27', approx(0.59500, abs=0.001)), - ('segments-35', approx(0.52666, abs=0.001)), - ('segments-46', approx(0.42166, abs=0.001)), - ('segments-59', approx(0.34583, abs=0.001)), - ('segments-77', approx(0.28916, abs=0.001)), - ('segments-100', approx(0.19750, abs=0.001)), # all of the above are AccuracyDistance + ('rgb', approx(0.92616, abs=0.001)), + ('contours', approx(0.25445, abs=0.001)), + ('phosphenes-12', approx(0.84177, abs=0.001)), + ('phosphenes-16', approx(0.77513, abs=0.001)), + ('phosphenes-21', approx(0.76437, abs=0.001)), + ('phosphenes-27', approx(0.56895, abs=0.001)), + ('phosphenes-35', approx(0.52008, abs=0.001)), + ('phosphenes-46', approx(0.29478, abs=0.001)), + ('phosphenes-59', approx(0.19022, abs=0.001)), + ('phosphenes-77', approx(0.13569, abs=0.001)), + ('phosphenes-100', approx(0.11234, abs=0.001)), + ('segments-12', approx(0.72937, abs=0.001)), + ('segments-16', approx(0.57043, abs=0.001)), + ('segments-21', approx(0.49300, abs=0.001)), + ('segments-27', approx(0.30014, abs=0.001)), + ('segments-35', approx(0.22442, abs=0.001)), + ('segments-46', approx(0.14312, abs=0.001)), + ('segments-59', approx(0.12072, abs=0.001)), + ('segments-77', approx(0.12996, abs=0.001)), + ('segments-100', approx(0.11540, abs=0.001)), # all of the above are AccuracyDistance ('phosphenes-all', approx(0.18057, abs=0.01)), # alls are ErrorConsistency ('segments-all', approx(0.15181, abs=0.01)), ]) diff --git a/brainscore_vision/metrics/accuracy_distance/metric.py b/brainscore_vision/metrics/accuracy_distance/metric.py index 9fcd6495d..fb31a7280 100644 --- a/brainscore_vision/metrics/accuracy_distance/metric.py +++ b/brainscore_vision/metrics/accuracy_distance/metric.py @@ -9,6 +9,10 @@ class AccuracyDistance(Metric): + """ + Computes the accuracy distance using the relative distance between the source and target accuracies, adjusted + for the maximum possible difference between the two accuracies. + """ def __call__(self, source: BehavioralAssembly, target: BehavioralAssembly) -> Score: """Target should be the entire BehavioralAssembly, containing truth values.""" @@ -43,9 +47,12 @@ def compare_single_subject(self, source: BehavioralAssembly, target: BehavioralA source_mean = sum(source_correct) / len(source_correct) target_mean = sum(target_correct) / len(target_correct) - source_to_target_distance = np.abs(source_mean - target_mean) - accuracy_distance_score = 1 - source_to_target_distance - return Score(accuracy_distance_score) + maximum_distance = np.max([1 - target_mean, target_mean]) + # get the proportion of the distance between the source and target accuracies, adjusted for the maximum possible + # difference between the two accuracies + relative_distance = 1 - np.abs(source_mean - target_mean) / maximum_distance + + return Score(relative_distance) def ceiling(self, assembly): subjects = self.extract_subjects(assembly) diff --git a/brainscore_vision/metrics/accuracy_distance/test.py b/brainscore_vision/metrics/accuracy_distance/test.py index 73277c25d..2fc15b792 100644 --- a/brainscore_vision/metrics/accuracy_distance/test.py +++ b/brainscore_vision/metrics/accuracy_distance/test.py @@ -9,7 +9,7 @@ def test_score(): assembly = _make_data() metric = load_metric('accuracy_distance') score = metric(assembly.sel(subject='A'), assembly) - assert score == approx(0.77777778) + assert score == approx(0.74074074) def test_has_error(): From 96afb459b6ebe456804e41b5818a2d10736ad59f Mon Sep 17 00:00:00 2001 From: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Date: Fri, 21 Jun 2024 07:46:20 -0400 Subject: [PATCH 05/68] marking test_look_at_neural_V1 memory intense (#925) Co-authored-by: Martin Schrimpf --- brainscore_vision/model_helpers/generic_plugin_tests.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/brainscore_vision/model_helpers/generic_plugin_tests.py b/brainscore_vision/model_helpers/generic_plugin_tests.py index c31b28128..f52e2d2cd 100644 --- a/brainscore_vision/model_helpers/generic_plugin_tests.py +++ b/brainscore_vision/model_helpers/generic_plugin_tests.py @@ -5,6 +5,7 @@ # noinspection PyUnresolvedReferences from brainscore_core.plugin_management.generic_plugin_tests_helper import pytest_generate_tests from brainscore_vision import BrainModel, load_model +import pytest def test_identifier(identifier: str): @@ -40,7 +41,7 @@ def test_look_at_behavior_probabilities(identifier: str): assert (0 <= predictions.values).all() assert (predictions.values <= 1).all() - +@pytest.mark.memory_intense def test_look_at_neural_V1(identifier: str): model = load_model(identifier) if not ProbeModel().can_start_recording_region(model, recording_target=BrainModel.RecordingTarget.V1): From 0245eafe63ef5859fd4df6f7212bee846afe1882 Mon Sep 17 00:00:00 2001 From: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Date: Fri, 21 Jun 2024 07:46:36 -0400 Subject: [PATCH 06/68] Skipping private tests on travis if PR is from a fork (#926) --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 61c968130..d3b99c2d6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,6 +34,7 @@ jobs: - name: 3.7 public python: '3.7' - name: 3.7 private + if: fork = false python: '3.7' env: - PRIVATE_ACCESS=1 From 70070dd551d2a340770c8331d8d27f3e4bb5c7d7 Mon Sep 17 00:00:00 2001 From: Katherine Fairchild Date: Sun, 23 Jun 2024 15:21:12 -0400 Subject: [PATCH 07/68] add my_custom_model to models (#934) Co-authored-by: AutoJenkins --- .../models/my_custom_model/__init__.py | 5 ++ .../models/my_custom_model/model.py | 57 +++++++++++++++++++ .../models/my_custom_model/setup.py | 26 +++++++++ .../models/my_custom_model/test.py | 1 + 4 files changed, 89 insertions(+) create mode 100755 brainscore_vision/models/my_custom_model/__init__.py create mode 100755 brainscore_vision/models/my_custom_model/model.py create mode 100755 brainscore_vision/models/my_custom_model/setup.py create mode 100755 brainscore_vision/models/my_custom_model/test.py diff --git a/brainscore_vision/models/my_custom_model/__init__.py b/brainscore_vision/models/my_custom_model/__init__.py new file mode 100755 index 000000000..f65aac9d8 --- /dev/null +++ b/brainscore_vision/models/my_custom_model/__init__.py @@ -0,0 +1,5 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['fast_2px_step2_eps2_repeat1_trial1_model_best'] = lambda: ModelCommitment(identifier='fast_2px_step2_eps2_repeat1_trial1_model_best', activations_model=get_model('fast_2px_step2_eps2_repeat1_trial1_model_best'), layers=get_layers('fast_2px_step2_eps2_repeat1_trial1_model_best')) diff --git a/brainscore_vision/models/my_custom_model/model.py b/brainscore_vision/models/my_custom_model/model.py new file mode 100755 index 000000000..c2fd63ad8 --- /dev/null +++ b/brainscore_vision/models/my_custom_model/model.py @@ -0,0 +1,57 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +import torch +import numpy as np +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +import torchvision.models as models +import gdown + +# This is an example implementation for submitting custom model named my_custom_model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. + + +def get_model_list(): + return ['fast_2px_step2_eps2_repeat1_trial1_model_best'] + + +def get_model(name): + trained_model = models.__dict__['resnet50']() + trained_model = torch.nn.DataParallel(trained_model) + + url = "https://drive.google.com/uc?id=1kNgOmtSrCQnyINVGw_l9vishwaNeqGN4" + output = "fast_2px_step2_eps2_repeat1_trial1_model_best.pth.tar" + gdown.download(url, output) + + checkpoint = torch.load("fast_2px_step2_eps2_repeat1_trial1_model_best.pth.tar", map_location=torch.device('cpu')) + trained_model.load_state_dict(checkpoint['state_dict']) + + trained_model = trained_model.module + + assert name == 'fast_2px_step2_eps2_repeat1_trial1_model_best' + preprocessing = functools.partial(load_preprocess_images, image_size=224) + # Wrap the model in PytorchWrapper directly + activations_model = PytorchWrapper(identifier='fast_2px_step2_eps2_repeat1_trial1_model_best', model=trained_model, preprocessing=preprocessing) + return activations_model + + +def get_layers(name): + assert name == 'fast_2px_step2_eps2_repeat1_trial1_model_best' + return ['layer1.0.conv3', 'layer1.1.conv3', 'layer1.2.conv3', + 'layer2.0.conv3', 'layer2.1.conv3', 'layer2.2.conv3', 'layer2.3.conv3', + 'layer3.0.conv3', 'layer3.1.conv3', 'layer3.2.conv3', 'layer3.3.conv3', 'layer3.4.conv3', 'layer3.5.conv3', + 'layer4.0.conv3', 'layer4.1.conv3', 'layer4.2.conv3'] + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) + diff --git a/brainscore_vision/models/my_custom_model/setup.py b/brainscore_vision/models/my_custom_model/setup.py new file mode 100755 index 000000000..2db7c2cf0 --- /dev/null +++ b/brainscore_vision/models/my_custom_model/setup.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch", + "gdown" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/my_custom_model/test.py b/brainscore_vision/models/my_custom_model/test.py new file mode 100755 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/my_custom_model/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration From cb175d1b2314db8b4d5dc623994f813a7b1553b6 Mon Sep 17 00:00:00 2001 From: Ben Lonnqvist Date: Mon, 24 Jun 2024 14:15:55 +0200 Subject: [PATCH 08/68] Add callable microsaccades as intended by #492 (#917) * add callable microsaccades * add test to catch errors in stimulus set meta attachment when using microsaccades * mark new test as memory intense * Apply suggestions from code review Co-authored-by: Martin Schrimpf * add require_variance to the signature of look_at for all behavioral arbiters (required since the generic look_at no longer takes kwargs but specifically require_variance) --------- Co-authored-by: Martin Schrimpf --- .../model_helpers/activations/core.py | 8 ++----- .../brain_transformation/__init__.py | 4 ++-- .../brain_transformation/behavior.py | 22 +++++++++++-------- .../activations/test___init__.py | 19 ++++++++++++++++ 4 files changed, 36 insertions(+), 17 deletions(-) diff --git a/brainscore_vision/model_helpers/activations/core.py b/brainscore_vision/model_helpers/activations/core.py index f37631dbb..a9f537250 100644 --- a/brainscore_vision/model_helpers/activations/core.py +++ b/brainscore_vision/model_helpers/activations/core.py @@ -73,7 +73,8 @@ def from_stimulus_set(self, stimulus_set, layers, stimuli_identifier=None, requi for hook in self._stimulus_set_hooks.copy().values(): # copy to avoid stale handles stimulus_set = hook(stimulus_set) stimuli_paths = [str(stimulus_set.get_stimulus(stimulus_id)) for stimulus_id in stimulus_set['stimulus_id']] - activations = self.from_paths(stimuli_paths=stimuli_paths, layers=layers, stimuli_identifier=stimuli_identifier) + activations = self.from_paths(stimuli_paths=stimuli_paths, layers=layers, stimuli_identifier=stimuli_identifier, + require_variance=require_variance) activations = attach_stimulus_set_meta(activations, stimulus_set, number_of_trials=self._microsaccade_helper.number_of_trials, @@ -578,15 +579,10 @@ def attach_stimulus_set_meta(assembly, stimulus_set, number_of_trials: int, requ assert (np.array(assembly_paths) == np.array(repeated_stimulus_paths)).all() repeated_stimulus_ids = np.repeat(stimulus_set['stimulus_id'].values, replication_factor) - if replication_factor > 1: - # repeat over the presentation dimension to accommodate multiple runs per stimulus - assembly = xr.concat([assembly for _ in range(replication_factor)], dim='presentation') assembly = assembly.reset_index('presentation') assembly['stimulus_path'] = ('presentation', repeated_stimulus_ids) assembly = assembly.rename({'stimulus_path': 'stimulus_id'}) - assert (np.array(assembly_paths) == np.array(stimulus_paths)).all() - all_columns = [] for column in stimulus_set.columns: repeated_values = np.repeat(stimulus_set[column].values, replication_factor) diff --git a/brainscore_vision/model_helpers/brain_transformation/__init__.py b/brainscore_vision/model_helpers/brain_transformation/__init__.py index c41188615..a1a3c1dc7 100644 --- a/brainscore_vision/model_helpers/brain_transformation/__init__.py +++ b/brainscore_vision/model_helpers/brain_transformation/__init__.py @@ -62,9 +62,9 @@ def start_task(self, task: BrainModel.Task, *args, **kwargs): else: self.do_behavior = False - def look_at(self, stimuli, number_of_trials=1): + def look_at(self, stimuli, number_of_trials: int = 1, require_variance: bool = False): if self.do_behavior: - return self.behavior_model.look_at(stimuli, number_of_trials=number_of_trials) + return self.behavior_model.look_at(stimuli, number_of_trials=number_of_trials, require_variance=require_variance) else: return self.layer_model.look_at(stimuli, number_of_trials=number_of_trials) diff --git a/brainscore_vision/model_helpers/brain_transformation/behavior.py b/brainscore_vision/model_helpers/brain_transformation/behavior.py index 17f02453d..28aeb9e6a 100644 --- a/brainscore_vision/model_helpers/brain_transformation/behavior.py +++ b/brainscore_vision/model_helpers/brain_transformation/behavior.py @@ -43,9 +43,10 @@ def start_task(self, task: BrainModel.Task, choice_labels): self.current_task = task self.choice_labels = choice_labels - def look_at(self, stimuli, number_of_trials=1): + def look_at(self, stimuli, number_of_trials: int = 1, require_variance: bool = False): assert self.current_task == BrainModel.Task.label - logits = self.activations_model(stimuli, layers=['logits']) + logits = self.activations_model(stimuli, layers=['logits'], number_of_trials=number_of_trials, + require_variance=require_variance) choices = self.logits_to_choice(logits) return choices @@ -183,20 +184,23 @@ def __init__(self, identifier, activations_model, layer): def identifier(self): return self._identifier - def start_task(self, task: BrainModel.Task, fitting_stimuli): + def start_task(self, task: BrainModel.Task, fitting_stimuli, number_of_trials=1, require_variance=False): assert task in [BrainModel.Task.passive, BrainModel.Task.probabilities] self.current_task = task - fitting_features = self.activations_model(fitting_stimuli, layers=self.readout) + fitting_features = self.activations_model(fitting_stimuli, layers=self.readout, + number_of_trials=number_of_trials, + require_variance=require_variance) fitting_features = fitting_features.transpose('presentation', 'neuroid') assert all(fitting_features['stimulus_id'].values == fitting_stimuli['stimulus_id'].values), \ "stimulus_id ordering is incorrect" - self.classifier.fit(fitting_features, fitting_stimuli['image_label']) + self.classifier.fit(fitting_features, fitting_features['image_label']) - def look_at(self, stimuli, number_of_trials=1): + def look_at(self, stimuli, number_of_trials=1, require_variance=False): if self.current_task is BrainModel.Task.passive: return - features = self.activations_model(stimuli, layers=self.readout) + features = self.activations_model(stimuli, layers=self.readout, number_of_trials=number_of_trials, + require_variance=require_variance) features = features.transpose('presentation', 'neuroid') prediction = self.classifier.predict_proba(features) return prediction @@ -259,13 +263,13 @@ def start_task(self, task: BrainModel.Task): assert task == BrainModel.Task.odd_one_out self.current_task = task - def look_at(self, triplets, number_of_trials=1): + def look_at(self, triplets, number_of_trials: int = 1, require_variance: bool = False): # Compute unique features and image_pathst stimuli = triplets.drop_duplicates(subset=['stimulus_id']) stimuli = stimuli.sort_values(by='stimulus_id') # Get features - features = self.activations_model(stimuli, layers=self.readout) + features = self.activations_model(stimuli, layers=self.readout, require_variance=require_variance) features = features.transpose('presentation', 'neuroid') # Compute similarity matrix diff --git a/tests/test_model_helpers/activations/test___init__.py b/tests/test_model_helpers/activations/test___init__.py index 108666982..99b36cb98 100644 --- a/tests/test_model_helpers/activations/test___init__.py +++ b/tests/test_model_helpers/activations/test___init__.py @@ -292,6 +292,25 @@ def test_from_stimulus_set(model_ctr, layers, pca_components): assert len(activations['neuroid']) == pca_components * len(layers) +@pytest.mark.memory_intense +@pytest.mark.parametrize("number_of_trials", [3, 10]) +@pytest.mark.parametrize(["model_ctr", "layers"], models_layers) +def test_microsaccades_from_stimulus_set(model_ctr, layers, number_of_trials): + image_names = ['rgb.jpg', 'grayscale.png', 'grayscale2.jpg', 'grayscale_alpha.png', 'palletized.png'] + stimulus_set = _build_stimulus_set(image_names) + + activations_extractor = model_ctr() + activations_extractor._extractor.set_visual_degrees(8.) + activations_extractor._extractor._microsaccade_helper.number_of_trials = number_of_trials + activations = activations_extractor.from_stimulus_set(stimulus_set, layers=layers, stimuli_identifier=False, + require_variance=True) + + assert activations is not None + assert len(activations['presentation']) == len(image_names) * number_of_trials + assert set(activations['stimulus_id'].values) == set(image_names) + assert len(np.unique(activations['layer'])) == len(layers) + + @pytest.mark.memory_intense @pytest.mark.parametrize("pca_components", [None, 1000]) def test_exact_activations(pca_components): From 573265cbc16be35dd1972ec2efeabb8bf4eb4f0e Mon Sep 17 00:00:00 2001 From: Michael Ferguson Date: Tue, 25 Jun 2024 11:40:49 -0400 Subject: [PATCH 09/68] adds the Ferguson 2024 data (#894) * adds the Ferguson 2024 data * bibtext from TBD -> blank * Data Packaging.py updates * init.py updates * preliminary fitting_stimuli.py commit * second fitting_stimuli.py commit * add 14 pretraining sets * complete fitting_stimuli.py and test.py * updated broken version ids * make assemblies private testing --- .../data/ferguson2024/__init__.py | 401 ++++++++++++++++++ .../data_packaging/data_packaging.py | 164 +++++++ .../data_packaging/fitting_stimuli.py | 20 + brainscore_vision/data/ferguson2024/test.py | 155 +++++++ 4 files changed, 740 insertions(+) create mode 100644 brainscore_vision/data/ferguson2024/__init__.py create mode 100644 brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py create mode 100644 brainscore_vision/data/ferguson2024/data_packaging/fitting_stimuli.py create mode 100644 brainscore_vision/data/ferguson2024/test.py diff --git a/brainscore_vision/data/ferguson2024/__init__.py b/brainscore_vision/data/ferguson2024/__init__.py new file mode 100644 index 000000000..1838e77d6 --- /dev/null +++ b/brainscore_vision/data/ferguson2024/__init__.py @@ -0,0 +1,401 @@ +from brainio.assemblies import BehavioralAssembly +from brainscore_vision import data_registry, stimulus_set_registry, load_stimulus_set +from brainscore_vision.data_helpers.s3 import load_assembly_from_s3, load_stimulus_set_from_s3 + +BIBTEX = """TBD""" + +# circle_line: +stimulus_set_registry['Ferguson2024_circle_line'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_circle_line', + bucket="brainio-brainscore", + csv_sha1="fc59d23ccfb41b4f98cf02865fc335439d2ad222", + zip_sha1="1f0065910b01a1a0e12611fe61252eafb9c534c3", + csv_version_id="Dcr1JsAE_bYBQwxYqem9JINE3d_bMLGu", + zip_version_id="ss4.fqG7b6NaHkbUXO.iH8f32J07_dmo") + +data_registry['Ferguson2024_circle_line'] = lambda: load_assembly_from_s3( + identifier='Ferguson2024_circle_line', + version_id="2EVlerzlieVA1NbfFiOx2xnhJdVagV4j", + sha1="586da7b1c7cb5a60fe72bc148513e3159a27b134", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Ferguson2024_circle_line'), +) + + +# color: +stimulus_set_registry['Ferguson2024_color'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_color', + bucket="brainio-brainscore", + csv_sha1="bc351933e1f21eee9704985c1b8231be6955d816", + zip_sha1="6ad04c58de8cc8c28b309572cc41c86470f0c322", + csv_version_id="jK6ddF6hF_oWmE5ccm4MyprQwjfeti3.", + zip_version_id="Eku5pHE1CNJBrcaRstb8PCYCjOliHQmY") + +data_registry['Ferguson2024_color'] = lambda: load_assembly_from_s3( + identifier='Ferguson2024_color', + version_id="VQo0U9ag8r7r9DREexvSlAD_Z326Iumr", + sha1="5b5d67fa3189db9984006910d1954586e6a5a9f3", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Ferguson2024_color'), +) + + +# convergence: +stimulus_set_registry['Ferguson2024_convergence'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_convergence', + bucket="brainio-brainscore", + csv_sha1="bc351933e1f21eee9704985c1b8231be6955d816", + zip_sha1="d65bdf6654e08c9107a20028281ab5e38a0be981", + csv_version_id="PcRg7fdEJI.Ce3wkd0v6sTC3jSw6xiiq", + zip_version_id="4EJaobPVM8STsvMKE.hEcePXcLAjB5VG") + +data_registry['Ferguson2024_convergence'] = lambda: load_assembly_from_s3( + identifier='Ferguson2024_convergence', + version_id="l.nJf3IXYqi5euv5xsqS_ip7Bs0ZpZLX", + sha1="5165c4b0da30826b89c2c242826bb79a4417b9a5", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Ferguson2024_convergence'), +) + + +# eighth: +stimulus_set_registry['Ferguson2024_eighth'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_eighth', + bucket="brainio-brainscore", + csv_sha1="bc351933e1f21eee9704985c1b8231be6955d816", + zip_sha1="6ee1931b316fd4ccf6eeb16788aa42bb7a36aa41", + csv_version_id="fVbTijqnoE61rcXNCopHMhXdrTavCIjS", + zip_version_id="ifwG3beZ0ePhQGqbo6S7D9Jj1LPCvwsJ") + +data_registry['Ferguson2024_eighth'] = lambda: load_assembly_from_s3( + identifier='Ferguson2024_eighth', + version_id="VklOC2KrpgLJpD1.kGj6Y5D4kLYSwr3s", + sha1="984f9498c42b14cfae6c7272a8707df96fea7ee2", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Ferguson2024_eighth'), +) + + +# gray_easy: +stimulus_set_registry['Ferguson2024_gray_easy'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_gray_easy', + bucket="brainio-brainscore", + csv_sha1="bc351933e1f21eee9704985c1b8231be6955d816", + zip_sha1="da76bdebf46fe0eb853ea1b877274b1f26f56dfc", + csv_version_id="WCw44X7HWimdn3qLi2D9DSOm5i2bLyrd", + zip_version_id="UJp9O0lHnMMPMFmwY29g5v1cHXvF1XpH") + +data_registry['Ferguson2024_gray_easy'] = lambda: load_assembly_from_s3( + identifier='Ferguson2024_gray_easy', + version_id="gaK.0mU6IVHjkI6MG9eE5Hz5Jt7_gxc6", + sha1="7b09c2f1e8199e680167cfeb124c28dc68c804ab", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Ferguson2024_gray_easy'), +) + + +# gray_hard: +stimulus_set_registry['Ferguson2024_gray_hard'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_gray_hard', + bucket="brainio-brainscore", + csv_sha1="bc351933e1f21eee9704985c1b8231be6955d816", + zip_sha1="93f02c051f4d89fe059345c0af7ba6fc83b65b35", + csv_version_id="bxWCJhmQw9RYxQx8qzGSltZCnSY4UTRI", + zip_version_id="WLtKonQVU9Og0ZbmVRJKx4Zzxb4INsT8") + +data_registry['Ferguson2024_gray_hard'] = lambda: load_assembly_from_s3( + identifier='Ferguson2024_gray_hard', + version_id="KSpwyfIqK6uovFojNd2_w08lKUJvfOWl", + sha1="2fa35d41e73053ece6d1f0120ca4dc9bc4a9d4ae", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Ferguson2024_gray_hard'), +) + + +# half: +stimulus_set_registry['Ferguson2024_half'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_half', + bucket="brainio-brainscore", + csv_sha1="bc351933e1f21eee9704985c1b8231be6955d816", + zip_sha1="6461a1d19e031943d42e97e8b557a97d14b18c55", + csv_version_id="WGrCxoue4oPYUKz81t30jcScz1dWs5Dv", + zip_version_id="9pvmNpTauZECPkemXEfLV_wYA9JZT0Iw") + +data_registry['Ferguson2024_half'] = lambda: load_assembly_from_s3( + identifier='Ferguson2024_half', + version_id="Z2Mpv3qH9foT9qggDIxWVHoEuKb6mC.a", + sha1="b65e14c5d62fee715438a613e55fffa5e6f76c40", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Ferguson2024_half'), +) + + +# juncture: +stimulus_set_registry['Ferguson2024_juncture'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_juncture', + bucket="brainio-brainscore", + csv_sha1="bc351933e1f21eee9704985c1b8231be6955d816", + zip_sha1="832102f1eaa713fdfd03c512df2b8feea422c61d", + csv_version_id="J3wrdsSM9LMlGFoC3ks5ees_t1sKjvKc", + zip_version_id="zNu6swQFgclcS8.miCuDBk4AQ4G54KT2") + +data_registry['Ferguson2024_juncture'] = lambda: load_assembly_from_s3( + identifier='Ferguson2024_juncture', + version_id="RstO_IgzeE2UbmHbMw6RN7vV8doFZKBq", + sha1="b18148383ef2158aa795b3cff8a8e237e08b5070", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Ferguson2024_juncture'), +) + + +# lle: +stimulus_set_registry['Ferguson2024_lle'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_lle', + bucket="brainio-brainscore", + csv_sha1="bc351933e1f21eee9704985c1b8231be6955d816", + zip_sha1="27817e955da9e4747d2aeb8757b7f6492bc7767e", + csv_version_id="y3epQUp6h7zH5h8251G8DlYzwtk6VYxW", + zip_version_id="RCPB0_kLL0GF3xrR0Nl.c11uAL8yYF8c") + +data_registry['Ferguson2024_lle'] = lambda: load_assembly_from_s3( + identifier='Ferguson2024_lle', + version_id="nXWjKJJyGtX.67m.M03oRw7ysfP76e4e", + sha1="08e98305657cd374d9ea103df0fe06783a70344a", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Ferguson2024_lle'), +) + + +# llh (assuming 'llh' is correct and not a placeholder): +stimulus_set_registry['Ferguson2024_llh'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_llh', + bucket="brainio-brainscore", + csv_sha1="bc351933e1f21eee9704985c1b8231be6955d816", + zip_sha1="56cdf86ecd0b52349b29b2ab0be89daeed9b0eb6", + csv_version_id="n3gooGN6lqWT5c.Qa3.kpUGUwogDtQUT", + zip_version_id="3A2EgFZ9Un_uFl43xqXIudDHUHdF7le1") + +data_registry['Ferguson2024_llh'] = lambda: load_assembly_from_s3( + identifier='Ferguson2024_llh', + version_id="prACZ4sm395A5yfJEYDG77MfGMJhXaXv", + sha1="864d49c00e777f3d464c6c0c59fee087c1de9037", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Ferguson2024_llh'), +) + + +# quarter: +stimulus_set_registry['Ferguson2024_quarter'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_quarter', + bucket="brainio-brainscore", + csv_sha1="bc351933e1f21eee9704985c1b8231be6955d816", + zip_sha1="c16c5ecf1f38af0d02255a78a6c438074ec8d446", + csv_version_id="lnk7H5WiGe3oB0i5PMTrXA_q058kZSDz", + zip_version_id="frHF3zSr4cCUEs7bVYjjaM3c0WQgwiA9") + +data_registry['Ferguson2024_quarter'] = lambda: load_assembly_from_s3( + identifier='Ferguson2024_quarter', + version_id="_q5R_GoANyjQ8DWQsY.2HBtzW8DoSGpm", + sha1="921b3b51208cdd5f163eca288ea83be47a2b482f", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Ferguson2024_quarter'), +) + + +# round_f: +stimulus_set_registry['Ferguson2024_round_f'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_round_f', + bucket="brainio-brainscore", + csv_sha1="bc351933e1f21eee9704985c1b8231be6955d816", + zip_sha1="7f456e390cb93805187266d05756beb9cf225e1d", + csv_version_id="jheoU.xYIbSk2hFPhCue2MGmXckqMooe", + zip_version_id="FtLIcpUQzHA_jPdRl_6iSJoqXZKDCeJn") + +data_registry['Ferguson2024_round_f'] = lambda: load_assembly_from_s3( + identifier='Ferguson2024_round_f', + version_id="0E7lr44ha3rV7xpnWnE1MpDV79seDxCe", + sha1="acb19ac865b45199a58609db31d3e885ff272fd4", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Ferguson2024_round_f'), +) + + +# round_v: +stimulus_set_registry['Ferguson2024_round_v'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_round_v', + bucket="brainio-brainscore", + csv_sha1="bc351933e1f21eee9704985c1b8231be6955d816", + zip_sha1="cebb84d2363c1539368e3e1b1bfd83305ad9ae13", + csv_version_id="6_N_s3Cz_g32jncN0bWoDCh.1pWdKCv2", + zip_version_id="r8e50KhAeIc0mKz1qE_xN2z4rMYaNsJ_") + +data_registry['Ferguson2024_round_v'] = lambda: load_assembly_from_s3( + identifier='Ferguson2024_round_v', + version_id="VS.8.ocCdNugRJNU6ha2Wm3K1lK4vK5k", + sha1="ce0361c4386dc7b8866d78023044b3009c84aa4b", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Ferguson2024_round_v'), +) + + +# tilted_line: +stimulus_set_registry['Ferguson2024_tilted_line'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_tilted_line', + bucket="brainio-brainscore", + csv_sha1="bc351933e1f21eee9704985c1b8231be6955d816", + zip_sha1="bb3d7bcb60ba586c8552266839187a59c2b3138f", + csv_version_id="7mcYPI8IYpS9Rz7pLm6QOxBne29.WcWp", + zip_version_id="5dvzTilCQkDUHG85qCCQZOhB6ZLpfU5_") + +data_registry['Ferguson2024_tilted_line'] = lambda: load_assembly_from_s3( + identifier='Ferguson2024_tilted_line', + version_id="ae4Dbo9JU_PDwTqKGD1G4DQNrdh2cVE2", + sha1="1806034da0c25e8625255eb94dc0a05c7e9cda1f", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Ferguson2024_tilted_line'), +) + + +''' +Pretraining Stimuli: +''' + +# circle_line +stimulus_set_registry['Ferguson2024_circle_line_training_stimuli'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_circle_line_training_stimuli', + bucket="brainio-brainscore", + csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", + zip_sha1="ba9088601d8c79ea5ff3d513e1a76b1232491918", + csv_version_id="fhYDvXCZNhij.2gnNfbTPlD.yOeiuz9G", + zip_version_id="i3lS29oWEn3JMReUaKZerehZKvZqaHq7") + +# color +stimulus_set_registry['Ferguson2024_color_training_stimuli'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_color_training_stimuli', + bucket="brainio-brainscore", + csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", + zip_sha1="dcd5044c73e7523afc94f91543adb444a437f911", + csv_version_id="hfvHFxmWOQUwq0LnSwrhk8xecaa9XhQW", + zip_version_id="uwMSKXr5yRYqVDXA9aoS66BBXtsu2kcx") + +# convergence +stimulus_set_registry['Ferguson2024_convergence_training_stimuli'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_convergence_training_stimuli', + bucket="brainio-brainscore", + csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", + zip_sha1="00eb401ddfc075a4bb448ec36b8a1c8f3ec1d6e4", + csv_version_id=".ZIqJlEMSgY_U5PeXBU33ifj2KMeMz2e", + zip_version_id="2bCuP2jVWc2WIuE9tD6b7TyPkuBbxyn0") + +# eighth +stimulus_set_registry['Ferguson2024_eighth_training_stimuli'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_eighth_training_stimuli', + bucket="brainio-brainscore", + csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", + zip_sha1="26edaec1d0dd14343a117340283e091a2245f3aa", + csv_version_id="X7gv5Rztd.VmOIr8rmEd7XYBWtsGDJdR", + zip_version_id="wVfBxoqcy6YIZnFLLu6rj.8XUXQmOQMg") + +# gray_easy +stimulus_set_registry['Ferguson2024_gray_easy_training_stimuli'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_gray_easy_training_stimuli', + bucket="brainio-brainscore", + csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", + zip_sha1="18211af83c680d5c916ec15b57b5b871494d6b28", + csv_version_id="j25..m3F2t7j.47YEiOHxTZxiq7ViPxc", + zip_version_id="h6fYQQ.DIWqr09rrqZIupCdUzJXFMTG9") + +# gray hard +stimulus_set_registry['Ferguson2024_gray_hard_training_stimuli'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_gray_hard_training_stimuli', + bucket="brainio-brainscore", + csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", + zip_sha1="a54d84dbb548706bbfeb239113a1d92205dc3f67", + csv_version_id="nNGjK3Mgo2h4WVT0yx_yvJeP1htuWSUl", + zip_version_id="MeyqeiOhGSRRLYzG_bLsG.Nj4W2ktRf8") + +# half +stimulus_set_registry['Ferguson2024_half_training_stimuli'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_half_training_stimuli', + bucket="brainio-brainscore", + csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", + zip_sha1="0db665619307d5c532a3ccd4311611e5a3830a10", + csv_version_id="bAg_H4VtFostaowCDqy2htVL9iBWCENh", + zip_version_id="rEsJ7ZopuRTyxSnA97ifpiHtkXqnWvR5") + +# juncture +stimulus_set_registry['Ferguson2024_juncture_training_stimuli'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_juncture_training_stimuli', + bucket="brainio-brainscore", + csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", + zip_sha1="eb99fd862bec5e61900d037b6b38abf2a278c9f0", + csv_version_id="Uikb_kSDojTsL8LXORmShk_cuW8lFxa.", + zip_version_id="wRFpwf_J2kC2WtBUDGiv1Enhrj5Ah5Gh") + +# lle +stimulus_set_registry['Ferguson2024_lle_training_stimuli'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_lle_training_stimuli', + bucket="brainio-brainscore", + csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", + zip_sha1="a1d19d0b77f0eb17ae886a1b7ccc649c5e51d84e", + csv_version_id="QXbtxFHLywcvLQy2enqL2Lxv9.bMgAwo", + zip_version_id="3izgx5jOCHDjH1fy_ncHOL7HxZIYt5nr") + +# llh +stimulus_set_registry['Ferguson2024_llh_training_stimuli'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_llh_training_stimuli', + bucket="brainio-brainscore", + csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", + zip_sha1="1550f9f71e6930caa15b96aaf811aa97d48d7267", + csv_version_id="M3WlC_zVg5m8rYLyJd1KlKo2wQkf36G7", + zip_version_id="brEvqix1vzPM6mX8Jnx7pOgJEHETpOXM") + +# quarter +stimulus_set_registry['Ferguson2024_quarter_training_stimuli'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_quarter_training_stimuli', + bucket="brainio-brainscore", + csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", + zip_sha1="22669e4a94718b3cbde3f5b2a493044bc091257e", + csv_version_id="lP4fsstG0Jfcnistm2H0AUhmPMHqAfTU", + zip_version_id="zpwv2_fwsmHk1TyR9_DYdmNGuLykgGX_") + +# round_f +stimulus_set_registry['Ferguson2024_round_f_training_stimuli'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_round_f_training_stimuli', + bucket="brainio-brainscore", + csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", + zip_sha1="e33855c899f78a115cf377a228e07d87baa554b7", + csv_version_id="csLNw6RL7nen9TFyH552JSahJkKbnNLE", + zip_version_id="7YYhm.tjysTS2e.IhjBx0ovOxWdAVv1M") + +# round_v +stimulus_set_registry['Ferguson2024_round_v_training_stimuli'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_round_v_training_stimuli', + bucket="brainio-brainscore", + csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", + zip_sha1="b1555f8a140a12e01a87a2f4e452d5863be43a5b", + csv_version_id="QeNeoWjAxMZO4AjmB2SZFC4qEzwf1cBw", + zip_version_id="gj32aM8zE_VXh_N9hNI42g1Uo5AxNDJh") + +# tilted_line +stimulus_set_registry['Ferguson2024_tilted_line_training_stimuli'] = lambda: load_stimulus_set_from_s3( + identifier='Ferguson2024_tilted_line_training_stimuli', + bucket="brainio-brainscore", + csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", + zip_sha1="e92533d8aded07ed90ef25650d0cf07c3a458be7", + csv_version_id="l.8gS70OruIDfDU9Oj.DAWw6BQNB.LKc", + zip_version_id="cAv1IPQkKX8Jey1gFc4VCwItECIiSlLV") \ No newline at end of file diff --git a/brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py b/brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py new file mode 100644 index 000000000..9ab988b55 --- /dev/null +++ b/brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py @@ -0,0 +1,164 @@ +from brainio.packaging import package_data_assembly +from pathlib import Path +from shutil import copy +from brainio.stimuli import StimulusSet +from brainio.packaging import package_stimulus_set +from brainio.assemblies import BehavioralAssembly +import pandas as pd + +DATASETS = ['circle_line', 'color', 'convergence', 'eighth', + 'gray_easy', 'gray_hard', 'half', 'juncture', + 'lle', 'llh', 'quarter', 'round_f', + 'round_v', 'tilted_line'] + + +# Packages the stimulus_sets for the Ferguson2024 experiment. There are 14 in all. +def create_stimulus_set_and_upload(name: str, experiment: str, upload_to_s3=True) -> StimulusSet: + """ + + Sample image from dataset: + first_block_0.png + + 1) first_block -> what block the stimuli belong two (which image is target, which is distractor) + 2) 0 -> a number, 0-23 indicating which variation the image is + + There are 24 images in the first block, and 24 in the second block, so the combined stimulus_set is length 48. + The packaged stimuli were structured so that the root folder (tilted_line) had two subfolders, /first_block and /second_block. + + :param name: the name of the experiment, usually Ferguson2024 + :param experiment: the dataset, i.e. color + :param upload_to_s3: True if you want to upload this to BrainIO on S3 + :return: the Stimulus Set + """ + + stimuli = [] + stimulus_paths = {} + stimuli_directory = f'{experiment}' + combine_block_images(stimuli_directory) + for filepath in Path(f"{stimuli_directory}/final").glob('**/*.png'): + stimulus_id = filepath.stem + parts_list = stimulus_id.split("_") + block = parts_list[0] + image_number = parts_list[2] + + stimulus_paths[stimulus_id] = filepath + stimuli.append({ + 'stimulus_id': stimulus_id, + 'image_number': image_number, + 'block': block, + }) + + stimuli = StimulusSet(stimuli) + stimuli.stimulus_paths = stimulus_paths + stimuli.name = f'{name}_{experiment}' # give the StimulusSet an identifier name + + # upload to S3 + if upload_to_s3: + init_data = package_stimulus_set(catalog_name=None, proto_stimulus_set=stimuli, + stimulus_set_identifier=stimuli.name, bucket_name="brainio-brainscore") + print(f"{experiment} stimulus_set\n{init_data}") + return stimuli + + +# Packages the assemblies for the Ferguson2024 experiment. There are 14 in all. +def create_assembly_and_upload(name: str, experiment: str, upload_to_s3=True) -> BehavioralAssembly: + """ + Takes in a sanity-processed csv file, converts to an assembly, and uploads it to BrainIO + + :param name: the name of the experiment, usually Ferguson2024 + :param experiment: the dataset, i.e. color + :param upload_to_s3: True if you want to upload this to BrainIO on S3 + :return: the assmebly + """ + all_subjects = pd.read_csv(f'csvs/{experiment}_sanity_processed.csv') + + # only look at testing data (no warmup or sanity data): + all_subjects = all_subjects[all_subjects["trial_type"] == "normal"] + all_subjects = bool_to_int(all_subjects, ['correct', 'target_present']) # cast bool to int for NetCDF + + # create an ID that is equal to the stimulus_set ID + all_subjects['stimulus_id'] = all_subjects['stimulus'].apply(extract_and_concatenate) + + assembly = BehavioralAssembly(all_subjects['correct'], + coords={ + 'stimulus_id': ('presentation', all_subjects['stimulus_id']), + 'stimulus_id_long': ('presentation', all_subjects['stimulus']), + 'participant_id': ('presentation', all_subjects['participant_id']), + 'response_time_ms': ('presentation', all_subjects['response_time_ms']), + 'correct': ('presentation', all_subjects['correct']), + 'target_present': ('presentation', all_subjects['target_present']), + 'distractor_nums': ('presentation', all_subjects['distractor_nums']), + 'block': ('presentation', all_subjects['block']), + 'keypress_response': ('presentation', all_subjects['response']), + 'trial_type': ('presentation', all_subjects['trial_type']), + }, + dims=['presentation'] + ) + + assembly.name = f"{name}_{experiment}" + + # upload assembly to S3 + if upload_to_s3: + init_data = package_data_assembly(None, assembly, assembly_identifier=assembly.name, + stimulus_set_identifier=f"{name}_{experiment}", + assembly_class_name="BehavioralAssembly", + bucket_name="brainio-brainscore") + print(f"{experiment} assembly\n{init_data}") + return assembly + + +# helper function to take in a folder with the structure outlined in the above file docs, and move them +# all into one folder +def combine_block_images(stimuli_directory: str) -> None: + """ + + :param stimuli_directory: the path where your stimuli are located. This folder has two subfolders, /first_block and + /second_block + """ + final_directory_path = Path(stimuli_directory) / 'final' + final_directory_path.mkdir(exist_ok=True) + subfolders = ['first_block', 'second_block'] + for subfolder in subfolders: + current_folder_path = Path(stimuli_directory) / subfolder + if not current_folder_path.exists(): + continue + for filepath in current_folder_path.glob('*.png'): + stimulus_id = filepath.stem + new_file_name = f"{subfolder}_{stimulus_id}.png" + new_file_path = final_directory_path / new_file_name + copy(filepath, new_file_path) + + +# helper function to get the stimulus_set stimulus_id from the assembly stimulus: +def extract_and_concatenate(url): + parts = url.split('/') + block_part = parts[-3] + file_name = parts[-1].replace(".png", "") + return f"{block_part}_{file_name}" + + +# Converts boolean values to integers in specified columns of a DataFrame. +def bool_to_int(df, columns): + for column in columns: + if column in df.columns: + df[column] = df[column].map({'True': 1, 'False': 0, True: 1, False: 0}).fillna(df[column]) + else: + print(f"Column '{column}' not found in DataFrame.") + return df + + +# wrapper function to loop over all datasets +def package_all_stimulus_sets(name): + for experiment in DATASETS: + create_stimulus_set_and_upload(name, experiment) + + +# wrapper function to loop over all datasets: +def package_all_assemblies(name): + for experiment in DATASETS: + create_assembly_and_upload(name, experiment) + + +if __name__ == '__main__': + package_all_stimulus_sets(name='Ferguson2024') + package_all_assemblies(name='Ferguson2024') diff --git a/brainscore_vision/data/ferguson2024/data_packaging/fitting_stimuli.py b/brainscore_vision/data/ferguson2024/data_packaging/fitting_stimuli.py new file mode 100644 index 000000000..3f1062ff1 --- /dev/null +++ b/brainscore_vision/data/ferguson2024/data_packaging/fitting_stimuli.py @@ -0,0 +1,20 @@ +from data_packaging import create_stimulus_set_and_upload, DATASETS + +# package and upload all 14 training stimuli sets +all_stimulus_sets = [] +paths = {} +for dataset in DATASETS: + + """ + For the fitting stimuli, each stimulus set will have 1920 images in them: + 320 images x 3 distractors (low, medium, high) x 2 types (target on distractor, distractor on target) + """ + + stimulus_set = create_stimulus_set_and_upload("Ferguson2024", f"{dataset}_training_stimuli", upload_to_s3=True) + all_stimulus_sets.append(stimulus_set) + + +# label each dataset with the name of the dataset +for df, name in zip(all_stimulus_sets, DATASETS): + df['experiment'] = name + diff --git a/brainscore_vision/data/ferguson2024/test.py b/brainscore_vision/data/ferguson2024/test.py new file mode 100644 index 000000000..79fbc37a5 --- /dev/null +++ b/brainscore_vision/data/ferguson2024/test.py @@ -0,0 +1,155 @@ +import numpy as np +import pytest +from brainscore_vision import load_dataset, load_stimulus_set +from brainscore_vision.benchmark_helpers import check_standard_format + + +# testing stimulus sets +@pytest.mark.private_access +class TestStimulusSets: + # test stimulus_set data: + @pytest.mark.parametrize('identifier', [ + 'circle_line', 'color', 'convergence', 'eighth', + 'gray_easy', 'gray_hard', 'half', 'juncture', + 'lle', 'llh', 'quarter', 'round_f', + 'round_v', 'tilted_line' + ]) + def test_stimulus_set_exist(self, identifier): + stimulus_set = load_stimulus_set(f"Ferguson2024_{identifier}") + assert stimulus_set is not None + assert stimulus_set.identifier == f"Ferguson2024_{identifier}" + + # test the number of images + @pytest.mark.parametrize('identifier', [ + 'circle_line', 'color', 'convergence', 'eighth', + 'gray_easy', 'gray_hard', 'half', 'juncture', + 'lle', 'llh', 'quarter', 'round_f', + 'round_v', 'tilted_line' + ]) + def test_num_images(self, identifier): + stimulus_set = load_stimulus_set(f"Ferguson2024_{identifier}") + assert len(np.unique(stimulus_set['stimulus_id'].values)) == 48 + + # test the number of blocks: + @pytest.mark.parametrize('identifier', [ + 'circle_line', 'color', 'convergence', 'eighth', + 'gray_easy', 'gray_hard', 'half', 'juncture', + 'lle', 'llh', 'quarter', 'round_f', + 'round_v', 'tilted_line' + ]) + def test_num_blocks(self, identifier): + stimulus_set = load_stimulus_set(f"Ferguson2024_{identifier}") + assert len(np.unique(stimulus_set['block'].values)) == 2 + + +# testing assemblies +@pytest.mark.private_access +class TestAssemblies: + # test stimulus_set data: + @pytest.mark.parametrize('identifier', [ + 'circle_line', 'color', 'convergence', 'eighth', + 'gray_easy', 'gray_hard', 'half', 'juncture', + 'lle', 'llh', 'quarter', 'round_f', + 'round_v', 'tilted_line' + ]) + def test_exist_and_alignment(self, identifier): + assembly = load_dataset(f"Ferguson2024_{identifier}") + assert assembly is not None + assert assembly.identifier == f"Ferguson2024_{identifier}" + assert assembly.stimulus_set.identifier == f"Ferguson2024_{identifier}" + + # test the number of images + @pytest.mark.parametrize('identifier', [ + 'circle_line', 'color', 'convergence', 'eighth', + 'gray_easy', 'gray_hard', 'half', 'juncture', + 'lle', 'llh', 'quarter', 'round_f', + 'round_v', 'tilted_line' + ]) + def test_distinct_values(self, identifier): + assembly = load_dataset(f"Ferguson2024_{identifier}") + assert set(assembly['block'].values) == {"first", "second"} + assert set(assembly['keypress_response'].values) == {"f", "j"} + assert set(assembly['trial_type'].values) == {"normal"} + assert set(assembly['distractor_nums'].values) == {"1.0", "5.0", "11.0"} + assert set(assembly['target_present'].values) == {True, False} + assert set(assembly['correct'].values) == {True, False} + + # test the number of subjects + @pytest.mark.parametrize('identifier, num_subjects', [ + ('circle_line', 30), + ('color', 29), + ('convergence', 27), + ('eighth', 30), + ('gray_easy', 28), + ('gray_hard', 29), + ('half', 29), + ('juncture', 27), + ('lle', 29), + ('llh', 28), + ('quarter', 28), + ('round_f', 30), + ('round_v', 29), + ('tilted_line', 30), + ]) + def test_num_subjects(self, identifier, num_subjects): + assembly = load_dataset(f"Ferguson2024_{identifier}") + assert set(assembly['participant_id'].values) == num_subjects + + # test the number of rows (size) + @pytest.mark.parametrize('identifier, size', [ + ('circle_line', 4292), + ('color', 4132), + ('convergence', 3874), + ('eighth', 4302), + ('gray_easy', 4047), + ('gray_hard', 4143), + ('half', 4162), + ('juncture', 3876), + ('lle', 4167), + ('llh', 4166), + ('quarter', 4050), + ('round_f', 4380), + ('round_v', 4257), + ('tilted_line', 4314), + ]) + def test_num_subjects(self, identifier, size): + assembly = load_dataset(f"Ferguson2024_{identifier}") + assert len(assembly) == size + + +# testing training sets +@pytest.mark.private_access +class TestTrainingStimulusSets: + # test stimulus_set data: + @pytest.mark.parametrize('identifier', [ + 'circle_line', 'color', 'convergence', 'eighth', + 'gray_easy', 'gray_hard', 'half', 'juncture', + 'lle', 'llh', 'quarter', 'round_f', + 'round_v', 'tilted_line' + ]) + def test_stimulus_set_exist(self, identifier): + stimulus_set = load_stimulus_set(f"Ferguson2024_{identifier}_training_stimuli") + assert stimulus_set is not None + assert stimulus_set.identifier == f"Ferguson2024_{identifier}_training_stimuli" + + # test the number of images + @pytest.mark.parametrize('identifier', [ + 'circle_line', 'color', 'convergence', 'eighth', + 'gray_easy', 'gray_hard', 'half', 'juncture', + 'lle', 'llh', 'quarter', 'round_f', + 'round_v', 'tilted_line' + ]) + def test_num_images(self, identifier): + stimulus_set = load_stimulus_set(f"Ferguson2024_{identifier}_training_stimuli") + assert len(np.unique(stimulus_set['stimulus_id'].values)) == 1920 + + # test the number of blocks: + @pytest.mark.parametrize('identifier', [ + 'circle_line', 'color', 'convergence', 'eighth', + 'gray_easy', 'gray_hard', 'half', 'juncture', + 'lle', 'llh', 'quarter', 'round_f', + 'round_v', 'tilted_line' + ]) + def test_num_blocks(self, identifier): + stimulus_set = load_stimulus_set(f"Ferguson2024_{identifier}_training_stimuli") + assert len(np.unique(stimulus_set['block'].values)) == 2 From 7f99883a023b43c2bbee321a42ad25be85776d36 Mon Sep 17 00:00:00 2001 From: YingtianDt <90408985+YingtianDt@users.noreply.github.com> Date: Wed, 26 Jun 2024 11:50:49 +0200 Subject: [PATCH 10/68] Add more temporal models (#924) * feature: support temporal models for neural alignment by chaning TemporalIgnore to Temporal Aligned * add example temporal submission * complete new framework * new module: temporal model helpers * change the arch of temporal; add tutorials * improve: better naming * update: wrapper tutorial on brain model * add feature: inferencer identifier tracked by extractor for result caching * fix: video fps sampling; need more tests! * fix bugs: video sampling based on fps was wrong. * add mmaction2 models; add more features to the inferencers * PR: temporal model helpers * PR fix: not including gitmodules for now * Update brainscore_vision/model_helpers/brain_transformation/temporal.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/model_helpers/brain_transformation/temporal.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/model_helpers/brain_transformation/temporal.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/models/temporal_models/test.py Co-authored-by: Martin Schrimpf * add mae_st; add ding2012 * try new arch * init ding2012 * add tests for temporal model helpers; add block inferencer * Delete tests/test_model_helpers/temporal/test___init__.py delete the old test * add benchmark ding2012 * add mutliple libs for temporal models * change executor output format; add more inference tests; init load_weight in s3 * add openstl * update backend for executor * feat:load_weight_file and corresponding test * change:resize strategy changed from bilinear to pooling * change:resize strategy changed from bilinear to pooling * fix mae_st submission * minor * fix:dtype in assembly time align * minor * update model submissions * fix dependency * refactor: simplify the inferencer methods * fix:block inferencer, neuroid coord while merging * fix:inferencer identifier * fix:weigh download * change tests to have max_workers=1 * revert screen.py * not submit region_layer_map * remove torch dependency * make fake modules in tests * add torch to requirements; avoid torch in tests * minor * minor * np.object changed to object * remove return in tests * fix insertion position bug * Apply suggestions from code review add: more type hints Co-authored-by: Martin Schrimpf * add: more type hints and comments * minor * pr:only commit temporal model helpers * pr: add one model for example * undo whole_brain in Brainodel.RecordingTarget * use logger and fix newlines * fix: video fps with copy was wrong * feat:fractional max_spatial_size * downsample layers in VideoMAE * fix:video sampling wrong duration * add more tests * fix merge * fix merge * module refactor; add more input test * add more temporal models * fix videomaev2 sha * fix:temporal_modelmae_st * change:video conservative loading; rename:image to pil image * fix:video last frame sampling; fix_time_naming * ignore pytest_cache * re-trigger tests * add joblib pool error management; fix video/image path recognizer * update: naming of failed to pickle func in joblibmapper --------- Co-authored-by: Yingtian Tang Co-authored-by: Martin Schrimpf Co-authored-by: Martin Schrimpf Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .gitignore | 1 + .../activations/temporal/core/executor.py | 11 +- .../activations/temporal/inputs/base.py | 4 +- .../activations/temporal/inputs/image.py | 11 +- .../activations/temporal/inputs/video.py | 5 +- .../activations/temporal/utils.py | 4 +- .../brain_transformation/temporal.py | 2 +- .../temporal_model_AVID-CMA/__init__.py | 17 ++ .../models/temporal_model_AVID-CMA/model.py | 92 +++++++ .../temporal_model_AVID-CMA/requirements.txt | 3 + .../models/temporal_model_AVID-CMA/test.py | 18 ++ .../models/temporal_model_GDT/__init__.py | 16 ++ .../models/temporal_model_GDT/model.py | 72 ++++++ .../temporal_model_GDT/requirements.txt | 3 + .../models/temporal_model_GDT/test.py | 17 ++ .../temporal_model_S3D_text_video/__init__.py | 14 ++ .../temporal_model_S3D_text_video/model.py | 65 +++++ .../requirements.txt | 1 + .../temporal_model_S3D_text_video/test.py | 15 ++ .../models/temporal_model_SeLaVi/__init__.py | 17 ++ .../models/temporal_model_SeLaVi/model.py | 68 ++++++ .../temporal_model_SeLaVi/requirements.txt | 3 + .../models/temporal_model_SeLaVi/test.py | 18 ++ .../temporal_model_VideoMAEv2/__init__.py | 14 ++ .../models/temporal_model_VideoMAEv2/model.py | 109 +++++++++ .../requirements.txt | 4 + .../models/temporal_model_VideoMAEv2/test.py | 16 ++ .../models/temporal_model_mae_st/__init__.py | 15 ++ .../models/temporal_model_mae_st/model.py | 120 ++++++++++ .../temporal_model_mae_st/requirements.txt | 3 + .../models/temporal_model_mae_st/test.py | 16 ++ .../temporal_model_mmaction2/__init__.py | 23 ++ .../temporal_model_mmaction2/mmaction2.csv | 24 ++ .../models/temporal_model_mmaction2/model.py | 226 ++++++++++++++++++ .../temporal_model_mmaction2/requirements.txt | 5 + .../models/temporal_model_mmaction2/test.py | 24 ++ .../models/temporal_model_openstl/__init__.py | 19 ++ .../models/temporal_model_openstl/model.py | 223 +++++++++++++++++ .../temporal_model_openstl/requirements.txt | 3 + .../models/temporal_model_openstl/test.py | 20 ++ .../temporal_model_torchvision/__init__.py | 19 ++ .../temporal_model_torchvision/model.py | 92 +++++++ .../requirements.txt | 2 + .../models/temporal_model_torchvision/test.py | 20 ++ .../temporal/activations/test_inputs.py | 1 + 45 files changed, 1463 insertions(+), 12 deletions(-) create mode 100644 brainscore_vision/models/temporal_model_AVID-CMA/__init__.py create mode 100644 brainscore_vision/models/temporal_model_AVID-CMA/model.py create mode 100644 brainscore_vision/models/temporal_model_AVID-CMA/requirements.txt create mode 100644 brainscore_vision/models/temporal_model_AVID-CMA/test.py create mode 100644 brainscore_vision/models/temporal_model_GDT/__init__.py create mode 100644 brainscore_vision/models/temporal_model_GDT/model.py create mode 100644 brainscore_vision/models/temporal_model_GDT/requirements.txt create mode 100644 brainscore_vision/models/temporal_model_GDT/test.py create mode 100644 brainscore_vision/models/temporal_model_S3D_text_video/__init__.py create mode 100644 brainscore_vision/models/temporal_model_S3D_text_video/model.py create mode 100644 brainscore_vision/models/temporal_model_S3D_text_video/requirements.txt create mode 100644 brainscore_vision/models/temporal_model_S3D_text_video/test.py create mode 100644 brainscore_vision/models/temporal_model_SeLaVi/__init__.py create mode 100644 brainscore_vision/models/temporal_model_SeLaVi/model.py create mode 100644 brainscore_vision/models/temporal_model_SeLaVi/requirements.txt create mode 100644 brainscore_vision/models/temporal_model_SeLaVi/test.py create mode 100644 brainscore_vision/models/temporal_model_VideoMAEv2/__init__.py create mode 100644 brainscore_vision/models/temporal_model_VideoMAEv2/model.py create mode 100644 brainscore_vision/models/temporal_model_VideoMAEv2/requirements.txt create mode 100644 brainscore_vision/models/temporal_model_VideoMAEv2/test.py create mode 100644 brainscore_vision/models/temporal_model_mae_st/__init__.py create mode 100644 brainscore_vision/models/temporal_model_mae_st/model.py create mode 100644 brainscore_vision/models/temporal_model_mae_st/requirements.txt create mode 100644 brainscore_vision/models/temporal_model_mae_st/test.py create mode 100644 brainscore_vision/models/temporal_model_mmaction2/__init__.py create mode 100644 brainscore_vision/models/temporal_model_mmaction2/mmaction2.csv create mode 100644 brainscore_vision/models/temporal_model_mmaction2/model.py create mode 100644 brainscore_vision/models/temporal_model_mmaction2/requirements.txt create mode 100644 brainscore_vision/models/temporal_model_mmaction2/test.py create mode 100644 brainscore_vision/models/temporal_model_openstl/__init__.py create mode 100644 brainscore_vision/models/temporal_model_openstl/model.py create mode 100644 brainscore_vision/models/temporal_model_openstl/requirements.txt create mode 100644 brainscore_vision/models/temporal_model_openstl/test.py create mode 100644 brainscore_vision/models/temporal_model_torchvision/__init__.py create mode 100644 brainscore_vision/models/temporal_model_torchvision/model.py create mode 100644 brainscore_vision/models/temporal_model_torchvision/requirements.txt create mode 100644 brainscore_vision/models/temporal_model_torchvision/test.py diff --git a/.gitignore b/.gitignore index a75baa9b4..5b7fe8320 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ venv .venv build .DS_Store +.pytest_cache # Model Weights *.pt diff --git a/brainscore_vision/model_helpers/activations/temporal/core/executor.py b/brainscore_vision/model_helpers/activations/temporal/core/executor.py index fbf9adcdb..9670a85ff 100644 --- a/brainscore_vision/model_helpers/activations/temporal/core/executor.py +++ b/brainscore_vision/model_helpers/activations/temporal/core/executor.py @@ -25,9 +25,16 @@ class JoblibMapper: def __init__(self, num_threads: int): self._num_threads = num_threads self._pool = Parallel(n_jobs=num_threads, verbose=False, backend="loky") + self._failed_to_pickle_func = False def map(self, func, *data): - return self._pool(delayed(func)(*x) for x in zip(*data)) + from joblib.externals.loky.process_executor import TerminatedWorkerError, BrokenProcessPool + if not self._failed_to_pickle_func: + try: + return self._pool(delayed(func)(*x) for x in zip(*data)) + except (TerminatedWorkerError, BrokenProcessPool): + self._failed_to_pickle_func = True + return [func(*x) for x in zip(*data)] class BatchExecutor: @@ -209,4 +216,4 @@ def execute(self, layers): layer_activations[layer] = [activations[i] for i in indices] self.clear_stimuli() - return layer_activations \ No newline at end of file + return layer_activations diff --git a/brainscore_vision/model_helpers/activations/temporal/inputs/base.py b/brainscore_vision/model_helpers/activations/temporal/inputs/base.py index dea5c86d3..d656a86b7 100644 --- a/brainscore_vision/model_helpers/activations/temporal/inputs/base.py +++ b/brainscore_vision/model_helpers/activations/temporal/inputs/base.py @@ -8,11 +8,11 @@ def from_path(self, path): @staticmethod def is_video_path(path: Union[str, Path]) -> bool: - extension = path.split('.')[-1] + extension = path.split('.')[-1].lower() return extension in ['mp4', 'avi', 'mov', 'flv', 'wmv', 'webm', 'mkv', 'gif'] @staticmethod def is_image_path(path: Union[str, Path]) -> bool: - extension = path.split('.')[-1] + extension = path.split('.')[-1].lower() return extension in ['jpg', 'jpeg', 'png', 'bmp', 'tiff'] \ No newline at end of file diff --git a/brainscore_vision/model_helpers/activations/temporal/inputs/image.py b/brainscore_vision/model_helpers/activations/temporal/inputs/image.py index adb85d559..6d7790ac1 100644 --- a/brainscore_vision/model_helpers/activations/temporal/inputs/image.py +++ b/brainscore_vision/model_helpers/activations/temporal/inputs/image.py @@ -25,18 +25,25 @@ def set_size(self, size): def from_path(path): return Image(path, get_image_size(path)) - def to_img(self): + def to_pil_img(self): return PILImage.fromarray(self.to_numpy()) + + def get_frame(self): + return np.array(PILImage.open(self._path).convert('RGB')) # return (H, W, C[RGB]) def to_numpy(self): - arr = np.array(PILImage.open(self._path).convert('RGB')) + arr = self.get_frame() if arr.shape[:2][::-1] != self._size: arr = batch_2d_resize(arr[None,:], self._size, "bilinear")[0] return arr + def store_to_path(self, path): + self.to_img().save(path) + return path + def get_image_size(path): with PILImage.open(path) as img: size = img.size diff --git a/brainscore_vision/model_helpers/activations/temporal/inputs/video.py b/brainscore_vision/model_helpers/activations/temporal/inputs/video.py index 81a2b5b57..d8287a4a7 100644 --- a/brainscore_vision/model_helpers/activations/temporal/inputs/video.py +++ b/brainscore_vision/model_helpers/activations/temporal/inputs/video.py @@ -120,7 +120,6 @@ def get_frames(self, indices): ### I/O def from_path(path): - path = path fps, end, size = get_video_stats(path) start = 0 return Video(path, fps, start, end, size) @@ -139,7 +138,7 @@ def to_numpy(self): sample_indices = samples.astype(int) # padding: repeat the first/last frame - original_num_frames = int(self._original_duration * self._original_fps/1000 + EPS) + original_num_frames = int(self._original_duration * self._original_fps/1000 - EPS) # EPS to avoid last frame OOB error sample_indices = np.clip(sample_indices, 0, original_num_frames-1) # actual sampling @@ -156,7 +155,7 @@ def to_frames(self): def to_pil_imgs(self): return [PILImage.fromarray(frame) for frame in self.to_numpy()] - + def to_path(self): # use context manager ? path = None # make a temporal file diff --git a/brainscore_vision/model_helpers/activations/temporal/utils.py b/brainscore_vision/model_helpers/activations/temporal/utils.py index 821c63cb2..d1f7a264c 100644 --- a/brainscore_vision/model_helpers/activations/temporal/utils.py +++ b/brainscore_vision/model_helpers/activations/temporal/utils.py @@ -43,7 +43,7 @@ def stack_with_nan_padding_(arr_list, axis=0, dtype=np.float16): return result -def stack_with_nan_padding(arr_list, axis=0, dtype=np.float16): +def stack_with_nan_padding(arr_list, axis=0, dtype=None): # Get shapes of all arrays shapes = [np.array(arr.shape) for arr in arr_list] max_shape = np.max(shapes, axis=0) @@ -58,7 +58,7 @@ def stack_with_nan_padding(arr_list, axis=0, dtype=np.float16): result = np.stack(results, axis=axis) result = np.swapaxes(result, 0, axis) - if result.dtype != dtype: + if dtype is not None and result.dtype != dtype: result = result.astype(dtype) return result diff --git a/brainscore_vision/model_helpers/brain_transformation/temporal.py b/brainscore_vision/model_helpers/brain_transformation/temporal.py index a10770f54..601b34530 100644 --- a/brainscore_vision/model_helpers/brain_transformation/temporal.py +++ b/brainscore_vision/model_helpers/brain_transformation/temporal.py @@ -170,13 +170,13 @@ def look_at(self, stimuli, number_of_trials=1): bin_responses = bin_responses.stack(time_bin=['time_bin_start', 'time_bin_end']) time_responses.append(bin_responses) responses = merge_data_arrays(time_responses) + responses = fix_timebin_naming(responses) else: # for temporal models, align the time bins responses = assembly_time_align(responses, self._time_bins) if len(self._time_bins) == 1: responses = responses.squeeze('time_bin') - responses = fix_timebin_naming(responses) return responses @property diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/__init__.py b/brainscore_vision/models/temporal_model_AVID-CMA/__init__.py new file mode 100644 index 000000000..91668400e --- /dev/null +++ b/brainscore_vision/models/temporal_model_AVID-CMA/__init__.py @@ -0,0 +1,17 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from brainscore_vision.model_helpers.activations.temporal.utils import get_specified_layers +from brainscore_vision.model_interface import BrainModel +from . import model + + +def commit_model(identifier): + activations_model=model.get_model(identifier) + layers=get_specified_layers(activations_model) + return ModelCommitment(identifier=identifier, activations_model=activations_model, layers=layers) + + +model_registry["AVID-CMA-Kinetics400"] = lambda: commit_model("AVID-CMA-Kinetics400") +model_registry["AVID-CMA-Audioset"] = lambda: commit_model("AVID-CMA-Audioset") +model_registry["AVID-Kinetics400"] = lambda: commit_model("AVID-Kinetics400") +model_registry["AVID-Audioset"] = lambda: commit_model("AVID-Audioset") diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/model.py b/brainscore_vision/models/temporal_model_AVID-CMA/model.py new file mode 100644 index 000000000..60d91f690 --- /dev/null +++ b/brainscore_vision/models/temporal_model_AVID-CMA/model.py @@ -0,0 +1,92 @@ +import yaml +import os + +import torch + +import avid_cma +from avid_cma.utils.logger import Logger +from avid_cma.utils import main_utils +from avid_cma.datasets import preprocessing + +from brainscore_vision.model_helpers.activations.temporal.model import PytorchWrapper +from brainscore_vision.model_helpers.s3 import load_weight_file + + +HOME = os.path.dirname(os.path.abspath(avid_cma.__file__)) + +def get_model(identifier): + + if identifier == 'AVID-CMA-Kinetics400': + cfg_path = os.path.join(HOME, "configs/main/avid-cma/kinetics/InstX-N1024-PosW-N64-Top32.yaml") + weight_path = load_weight_file( + bucket="brainscore-vision", + relative_path="temporal_model_AVID-CMA/AVID-CMA_Kinetics_InstX-N1024-PosW-N64-Top32_checkpoint.pth.tar", + version_id="yx9Pbq3SuNOOd4sX7csTolaHD1iTCx8y", + sha1="6efe4464ca654a56affff766acf24e89e6f3ffbf" + ) + + elif identifier == 'AVID-CMA-Audioset': + cfg_path = os.path.join(HOME, "configs/main/avid-cma/audioset/InstX-N1024-PosW-N64-Top32.yaml") + weight_path = load_weight_file( + bucket="brainscore-vision", + relative_path="temporal_model_AVID-CMA/AVID-CMA_Audioset_InstX-N1024-PosW-N64-Top32_checkpoint.pth.tar", + version_id="jSaZgbUohM0ZeoEUUKZiLBo6iz_v8VvQ", + sha1="9db5eba9aab6bdbb74025be57ab532df808fe3f6" + ) + + elif identifier == 'AVID-Kinetics400': + cfg_path = os.path.join(HOME, "configs/main/avid/kinetics/Cross-N1024.yaml") + weight_path = load_weight_file( + bucket="brainscore-vision", + relative_path="temporal_model_AVID-CMA/AVID_Kinetics_Cross-N1024_checkpoint.pth.tar", + version_id="XyKt0UOUFsuuyrl6ZREivK8FadRPx34u", + sha1="d3a04f856d29421ba8de37808593a3fad4d4794f" + ) + + elif identifier == 'AVID-Audioset': + cfg_path = os.path.join(HOME, "configs/main/avid/audioset/Cross-N1024.yaml") + weight_path = load_weight_file( + bucket="brainscore-vision", + relative_path="temporal_model_AVID-CMA/AVID_Audioset_Cross-N1024_checkpoint.pth.tar", + version_id="0Sxuhn8LsYXQC4FnPfJ7rw7uU6kDlKgc", + sha1="b48d8428a1a2526ccca070f810333df18bfce5fd" + ) + + else: + raise ValueError(f"Unknown model identifier: {identifier}") + + + cfg = yaml.safe_load(open(cfg_path)) + cfg['model']['args']['checkpoint'] = weight_path + logger = Logger() + + # Define model + model = main_utils.build_model(cfg['model'], logger) + + # take only video model + model = model.video_model + + # Define dataloaders + db_cfg = cfg['dataset'] + print(db_cfg) + + num_frames = int(db_cfg['video_clip_duration'] * db_cfg['video_fps']) + + _video_transform = preprocessing.VideoPrep_Crop_CJ( + resize=(256, 256), + crop=(db_cfg['crop_size'], db_cfg['crop_size']), + augment=False, + num_frames=num_frames, + pad_missing=True, + ) + + def video_transform(video): + frames = video.to_pil_imgs() + return _video_transform(frames) + + layer_activation_format = { + 'conv1': 'CTHW', + **{f"conv{i}x": 'CTHW' for i in range(2, 6)}, + } + + return PytorchWrapper(identifier, model, video_transform, fps=db_cfg['video_fps'], layer_activation_format=layer_activation_format) \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/requirements.txt b/brainscore_vision/models/temporal_model_AVID-CMA/requirements.txt new file mode 100644 index 000000000..47cc15207 --- /dev/null +++ b/brainscore_vision/models/temporal_model_AVID-CMA/requirements.txt @@ -0,0 +1,3 @@ +avid_cma @ git+https://github.com/YingtianDt/AVID-CMA.git +torch +torchvision \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/test.py b/brainscore_vision/models/temporal_model_AVID-CMA/test.py new file mode 100644 index 000000000..d775f732d --- /dev/null +++ b/brainscore_vision/models/temporal_model_AVID-CMA/test.py @@ -0,0 +1,18 @@ +import pytest + +from brainscore_vision import load_model + + +model_list = [ + "AVID-CMA-Kinetics400", + "AVID-CMA-Audioset", + "AVID-Kinetics400", + "AVID-Audioset" +] + +@pytest.mark.private_access +@pytest.mark.memory_intense +@pytest.mark.parametrize("model_identifier", model_list) +def test_load(model_identifier): + model = load_model(model_identifier) + assert model is not None \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_GDT/__init__.py b/brainscore_vision/models/temporal_model_GDT/__init__.py new file mode 100644 index 000000000..29d479d31 --- /dev/null +++ b/brainscore_vision/models/temporal_model_GDT/__init__.py @@ -0,0 +1,16 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from brainscore_vision.model_helpers.activations.temporal.utils import get_specified_layers +from brainscore_vision.model_interface import BrainModel +from . import model + + +def commit_model(identifier): + activations_model=model.get_model(identifier) + layers=get_specified_layers(activations_model) + return ModelCommitment(identifier=identifier, activations_model=activations_model, layers=layers) + + +model_registry["GDT-Kinetics400"] = lambda: commit_model("GDT-Kinetics400") +model_registry["GDT-HowTo100M"] = lambda: commit_model("GDT-HowTo100M") +model_registry["GDT-IG65M"] = lambda: commit_model("GDT-IG65M") diff --git a/brainscore_vision/models/temporal_model_GDT/model.py b/brainscore_vision/models/temporal_model_GDT/model.py new file mode 100644 index 000000000..624a5b29b --- /dev/null +++ b/brainscore_vision/models/temporal_model_GDT/model.py @@ -0,0 +1,72 @@ +import torch + +from gdt_model.model import GDT +from gdt_model.video_transforms import clip_augmentation + +from brainscore_vision.model_helpers.activations.temporal.model import PytorchWrapper +from brainscore_vision.model_helpers.s3 import load_weight_file + + +def transform_video(video): + arr = video.to_numpy() + arr = torch.as_tensor(arr) + return clip_augmentation(arr) + + +def get_model(identifier): + + assert identifier.startswith("GDT-") + dataset = "-".join(identifier.split("-")[1:]) + + if dataset == "Kinetics400": + pth = load_weight_file( + bucket="brainscore-vision", + relative_path="temporal_model_GDT/gdt_K400.pth", + version_id="JpU_tnCzrbTejn6sOrQMk8eRsJ97yFgt", + sha1="7f12c60670346b1aab15194eb44c341906e1bca6" + ) + elif dataset == "IG65M": + pth = load_weight_file( + bucket="brainscore-vision", + relative_path="temporal_model_GDT/gdt_IG65M.pth", + version_id="R.NoD6VAbFbJdf8tg5jnXIWB3hQ8GlSD", + sha1="3dcee3af61691e1e7e47e4b115be6808f4ea8172" + ) + elif dataset == "HowTo100M": + pth = load_weight_file( + bucket="brainscore-vision", + relative_path="temporal_model_GDT/gdt_HT100M.pth", + version_id="BVRl9t_134PoKZCn9W54cyfkImCW2ioq", + sha1="a9a979c82e83b955794814923af736eb34e6f080" + ) + else: + raise ValueError(f"Unknown dataset: {dataset}") + + # Load model + model = GDT( + vid_base_arch="r2plus1d_18", + aud_base_arch="resnet9", + pretrained=False, + norm_feat=False, + use_mlp=False, + num_classes=256, + ) + + model = model.video_network # Remove audio network + + # Load weights + state_dict_ = torch.load(pth, map_location="cpu")['model'] + state_dict = {} + for k, v in list(state_dict_.items()): + if k.startswith("video_network."): + k = k[len("video_network."):] + state_dict[k] = v + model.load_state_dict(state_dict) + + layer_activation_format = { + "base.stem": "CTHW", + **{f"base.layer{i}": "CTHW" for i in range(1, 5)}, + # "base.fc": "C", # no fc + } + + return PytorchWrapper(identifier, model, transform_video, fps=30, layer_activation_format=layer_activation_format) \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_GDT/requirements.txt b/brainscore_vision/models/temporal_model_GDT/requirements.txt new file mode 100644 index 000000000..35ea5ddb9 --- /dev/null +++ b/brainscore_vision/models/temporal_model_GDT/requirements.txt @@ -0,0 +1,3 @@ +gdt_model @ git+https://github.com/YingtianDt/GDT.git +torch +torchvision \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_GDT/test.py b/brainscore_vision/models/temporal_model_GDT/test.py new file mode 100644 index 000000000..f3092c785 --- /dev/null +++ b/brainscore_vision/models/temporal_model_GDT/test.py @@ -0,0 +1,17 @@ +import pytest + +from brainscore_vision import load_model + + +model_list = [ + "GDT-Kinetics400", + "GDT-HowTo100M", + "GDT-IG65M", +] + +@pytest.mark.private_access +@pytest.mark.memory_intense +@pytest.mark.parametrize("model_identifier", model_list) +def test_load(model_identifier): + model = load_model(model_identifier) + assert model is not None \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_S3D_text_video/__init__.py b/brainscore_vision/models/temporal_model_S3D_text_video/__init__.py new file mode 100644 index 000000000..5b7c12472 --- /dev/null +++ b/brainscore_vision/models/temporal_model_S3D_text_video/__init__.py @@ -0,0 +1,14 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from brainscore_vision.model_helpers.activations.temporal.utils import get_specified_layers +from brainscore_vision.model_interface import BrainModel +from . import model + + +def commit_model(identifier): + activations_model=model.get_model(identifier) + layers=get_specified_layers(activations_model) + return ModelCommitment(identifier=identifier, activations_model=activations_model, layers=layers) + + +model_registry["s3d-HowTo100M"] = lambda: commit_model("s3d-HowTo100M") diff --git a/brainscore_vision/models/temporal_model_S3D_text_video/model.py b/brainscore_vision/models/temporal_model_S3D_text_video/model.py new file mode 100644 index 000000000..d463caf9b --- /dev/null +++ b/brainscore_vision/models/temporal_model_S3D_text_video/model.py @@ -0,0 +1,65 @@ +import torch +import numpy as np +from torchvision import transforms +from s3dg_howto100m import S3D + +from brainscore_vision.model_helpers.activations.temporal.model.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.s3 import load_weight_file + + +img_transform = transforms.Compose([ + transforms.Resize((256, 256)), +]) + +def transform_video(video): + frames = video.to_numpy() / 255. + frames = torch.Tensor(frames) + frames = frames.permute(0, 3, 1, 2) + frames = img_transform(frames) + return frames.permute(1, 0, 2, 3) + + +def get_model(identifier="s3d-HowTo100M"): + inferencer_kwargs = { + "fps": 24, # common YouTube frame rate + "layer_activation_format": + { + "conv1": "CTHW", + "conv_2c": "CTHW", + "mixed_3c": "CTHW", + "mixed_4b": "CTHW", + "mixed_4d": "CTHW", + "mixed_4f": "CTHW", + "mixed_5c": "CTHW", + "fc": "C" + }, + } + process_output = None + + model_name = identifier + + model_pth = load_weight_file( + bucket="brainscore-vision", + relative_path="temporal_model_S3D_text_video/s3d_howto100m.pth", + version_id="hRp6I8bpwreIMUVL0H.zCdK0hqRggL7n", + sha1="31e99d2a1cd48f2259ca75e719ac82c8b751ea75" + ) + + dict_pth = load_weight_file( + bucket="brainscore-vision", + relative_path="temporal_model_S3D_text_video/s3d_dict.npy", + version_id="4NxVLe8DSL6Uue0F7e2rz8HZuOk.tkBI", + sha1="d368ff7d397ec8240f1f963b5efe8ff245bac35f" + ) + + # Instantiate the model + model = S3D(dict_pth, 512) + + # Load the model weights + model.load_state_dict(torch.load(model_pth)) + + wrapper = PytorchWrapper(identifier, model, transform_video, + process_output=process_output, + **inferencer_kwargs) + + return wrapper \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_S3D_text_video/requirements.txt b/brainscore_vision/models/temporal_model_S3D_text_video/requirements.txt new file mode 100644 index 000000000..73f27f3b6 --- /dev/null +++ b/brainscore_vision/models/temporal_model_S3D_text_video/requirements.txt @@ -0,0 +1 @@ +S3D_HowTo100M @ git+https://github.com/YingtianDt/S3D_HowTo100M \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_S3D_text_video/test.py b/brainscore_vision/models/temporal_model_S3D_text_video/test.py new file mode 100644 index 000000000..e6c7fdb18 --- /dev/null +++ b/brainscore_vision/models/temporal_model_S3D_text_video/test.py @@ -0,0 +1,15 @@ +import pytest + +from brainscore_vision import load_model + + +model_list = [ + "s3d-HowTo100M", +] + +@pytest.mark.private_access +@pytest.mark.memory_intense +@pytest.mark.parametrize("model_identifier", model_list) +def test_load(model_identifier): + model = load_model(model_identifier) + assert model is not None \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_SeLaVi/__init__.py b/brainscore_vision/models/temporal_model_SeLaVi/__init__.py new file mode 100644 index 000000000..68f5deecf --- /dev/null +++ b/brainscore_vision/models/temporal_model_SeLaVi/__init__.py @@ -0,0 +1,17 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from brainscore_vision.model_helpers.activations.temporal.utils import get_specified_layers +from brainscore_vision.model_interface import BrainModel +from . import model + + +def commit_model(identifier): + activations_model=model.get_model(identifier) + layers=get_specified_layers(activations_model) + return ModelCommitment(identifier=identifier, activations_model=activations_model, layers=layers) + + +model_registry["SeLaVi-Kinetics400"] = lambda: commit_model("SeLaVi-Kinetics400") +model_registry["SeLaVi-Kinetics-Sound"] = lambda: commit_model("SeLaVi-Kinetics-Sound") +model_registry["SeLaVi-VGG-Sound"] = lambda: commit_model("SeLaVi-VGG-Sound") +model_registry["SeLaVi-AVE"] = lambda: commit_model("SeLaVi-AVE") diff --git a/brainscore_vision/models/temporal_model_SeLaVi/model.py b/brainscore_vision/models/temporal_model_SeLaVi/model.py new file mode 100644 index 000000000..d6c34eb02 --- /dev/null +++ b/brainscore_vision/models/temporal_model_SeLaVi/model.py @@ -0,0 +1,68 @@ +import torch + +from selavi.model import load_model +from selavi.video_transforms import clip_augmentation + +from brainscore_vision.model_helpers.activations.temporal.model import PytorchWrapper +from brainscore_vision.model_helpers.activations.temporal.utils import download_weight_file + + +def transform_video(video): + arr = video.to_numpy() + arr = torch.as_tensor(arr) + return clip_augmentation(arr) + + +def get_model(identifier): + + assert identifier.startswith("SeLaVi-") + dataset = "-".join(identifier.split("-")[1:]) + + if dataset == "Kinetics400": + model_name = "selavi_kinetics.pth" + num_classes = 400 + elif dataset == "Kinetics-Sound": + model_name = "selavi_kinetics_sound.pth" + num_classes = 32 + elif dataset == "VGG-Sound": + model_name = "selavi_vgg_sound.pth" + num_classes = 309 + elif dataset == "AVE": + model_name = "selavi_ave.pth" + num_classes = 28 + else: + raise ValueError(f"Unknown dataset: {dataset}") + + url = f"https://dl.fbaipublicfiles.com/selavi/{model_name}" + pth = download_weight_file(url, folder="temporal_model_SeLaVi") + + # Load model + model = load_model( + vid_base_arch="r2plus1d_18", + aud_base_arch="resnet9", + use_mlp=True, + num_classes=num_classes, + pretrained=False, + norm_feat=False, + use_max_pool=False, + headcount=10, + ) + + model = model.video_network # Remove audio network + + # Load weights + state_dict_ = torch.load(pth, map_location="cpu")['model'] + state_dict = {} + for k, v in list(state_dict_.items()): + if k.startswith("module.video_network."): + k = k[len("module.video_network."):] + state_dict[k] = v + model.load_state_dict(state_dict) + + layer_activation_format = { + "base.stem": "CTHW", + **{f"base.layer{i}": "CTHW" for i in range(1, 5)}, + # "base.fc": "C", # no fc + } + + return PytorchWrapper(identifier, model, transform_video, fps=30, layer_activation_format=layer_activation_format) \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_SeLaVi/requirements.txt b/brainscore_vision/models/temporal_model_SeLaVi/requirements.txt new file mode 100644 index 000000000..ecc9cdae6 --- /dev/null +++ b/brainscore_vision/models/temporal_model_SeLaVi/requirements.txt @@ -0,0 +1,3 @@ +torch +torchvision +selavi @ git+https://github.com/YingtianDt/selavi.git \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_SeLaVi/test.py b/brainscore_vision/models/temporal_model_SeLaVi/test.py new file mode 100644 index 000000000..1ad8c439c --- /dev/null +++ b/brainscore_vision/models/temporal_model_SeLaVi/test.py @@ -0,0 +1,18 @@ +import pytest + +from brainscore_vision import load_model + + +model_list = [ + "SeLaVi-Kinetics400", + "SeLaVi-Kinetics-Sound", + "SeLaVi-VGG-Sound", + "SeLaVi-AVE" +] + +@pytest.mark.private_access +@pytest.mark.memory_intense +@pytest.mark.parametrize("model_identifier", model_list) +def test_load(model_identifier): + model = load_model(model_identifier) + assert model is not None \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_VideoMAEv2/__init__.py b/brainscore_vision/models/temporal_model_VideoMAEv2/__init__.py new file mode 100644 index 000000000..1579fc0e4 --- /dev/null +++ b/brainscore_vision/models/temporal_model_VideoMAEv2/__init__.py @@ -0,0 +1,14 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from brainscore_vision.model_helpers.activations.temporal.utils import get_specified_layers +from brainscore_vision.model_interface import BrainModel +from . import model + + +def commit_model(identifier): + activations_model=model.get_model(identifier) + layers=get_specified_layers(activations_model) + return ModelCommitment(identifier=identifier, activations_model=activations_model, layers=layers) + +model_registry["VideoMAE-V2-B"] = lambda: commit_model("VideoMAE-V2-B") +model_registry["VideoMAE-V2-G"] = lambda: commit_model("VideoMAE-V2-G") diff --git a/brainscore_vision/models/temporal_model_VideoMAEv2/model.py b/brainscore_vision/models/temporal_model_VideoMAEv2/model.py new file mode 100644 index 000000000..7e785513e --- /dev/null +++ b/brainscore_vision/models/temporal_model_VideoMAEv2/model.py @@ -0,0 +1,109 @@ + +import numpy as np +import torch +from timm.models import create_model +from torchvision import transforms + +# NOTE: Do not comment `import models`, it is used to register models +from videomae_v2 import * # noqa: F401 + +from brainscore_vision.model_helpers.activations.temporal.model import PytorchWrapper +from brainscore_vision.model_helpers.s3 import load_weight_file + + +LAYER_SELECT_STEP = 2 + +def to_normalized_float_tensor(vid): + vid = torch.Tensor(vid.to_numpy()) + return vid.permute(3, 0, 1, 2).to(torch.float32) / 255 + +def resize(vid, size, interpolation='bilinear'): + scale = None + if isinstance(size, int): + scale = float(size) / min(vid.shape[-2:]) + size = None + return torch.nn.functional.interpolate( + vid, + size=size, + scale_factor=scale, + mode=interpolation, + align_corners=False) + +class ToFloatTensorInZeroOne(object): + def __call__(self, vid): + return to_normalized_float_tensor(vid) + + +class Resize(object): + def __init__(self, size): + self.size = size + + def __call__(self, vid): + return resize(vid, self.size) + + +transform_video = transforms.Compose( + [ToFloatTensorInZeroOne(), + Resize((224, 224))]) + +def get_model(identifier): + + if identifier == "VideoMAE-V2-G": + model_name = "vit_giant_patch14_224" + pth = load_weight_file( + bucket="brainscore-vision", + relative_path="temporal_model_VideoMAEv2/vit_g_hybrid_pt_1200e.pth", + version_id="TxtkfbeMV105dzpzTwi0Kn5glnvQvIrq", + sha1="9048f2bc0b0c7ba4d0e5228f3a7c0bef4dbaca69", + ) + num_blocks = 40 + feature_map_size = 16 + elif identifier == "VideoMAE-V2-B": + model_name = "vit_base_patch16_224" + pth = load_weight_file( + bucket="brainscore-vision", + relative_path="temporal_model_VideoMAEv2/vit_b_hybrid_pt_800e.pth", + version_id="rRjpYq21dAQ5KaCLbEHK.YaLZ_fbMPKw", + sha1="1e3602691964b1eb6f7c33529119243a5b235635" + ) + num_blocks = 12 + feature_map_size = 14 + + num_frames = 16 + + model = create_model(model_name) + + ckpt = torch.load(pth, map_location='cpu') + for model_key in ['model', 'module']: + if model_key in ckpt: + ckpt = ckpt[model_key] + break + + encoder_ckpt = {} + for k, v in ckpt.items(): + if k.startswith("encoder."): + encoder_ckpt[k[8:]] = v + + msg = model.load_state_dict(encoder_ckpt, strict=False) + print(msg) + + inferencer_kwargs = { + "fps": 6.25, + "layer_activation_format": { + "patch_embed": "THWC", + **{f"blocks.{i}": "THWC" for i in range(0, num_blocks, LAYER_SELECT_STEP)}, + # "head": "THWC" # weight not available + }, + "num_frames": num_frames, + } + + def process_activation(layer, layer_name, inputs, output): + B = output.shape[0] + C = output.shape[-1] + output = output.reshape(B, -1, feature_map_size, feature_map_size, C) + return output + + wrapper = PytorchWrapper(identifier, model, transform_video, + process_output=process_activation, + **inferencer_kwargs) + return wrapper diff --git a/brainscore_vision/models/temporal_model_VideoMAEv2/requirements.txt b/brainscore_vision/models/temporal_model_VideoMAEv2/requirements.txt new file mode 100644 index 000000000..5af2d95b6 --- /dev/null +++ b/brainscore_vision/models/temporal_model_VideoMAEv2/requirements.txt @@ -0,0 +1,4 @@ +torch +torchvision +timm +videomae_v2 @ git+https://github.com/YingtianDt/VideoMAEv2.git \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_VideoMAEv2/test.py b/brainscore_vision/models/temporal_model_VideoMAEv2/test.py new file mode 100644 index 000000000..905296c3a --- /dev/null +++ b/brainscore_vision/models/temporal_model_VideoMAEv2/test.py @@ -0,0 +1,16 @@ +import pytest + +from brainscore_vision import load_model + + +model_list = [ + "VideoMAE-V2-B", + "VideoMAE-V2-G", +] + +@pytest.mark.private_access +@pytest.mark.memory_intense +@pytest.mark.parametrize("model_identifier", model_list) +def test_load(model_identifier): + model = load_model(model_identifier) + assert model is not None \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_mae_st/__init__.py b/brainscore_vision/models/temporal_model_mae_st/__init__.py new file mode 100644 index 000000000..6eb152a20 --- /dev/null +++ b/brainscore_vision/models/temporal_model_mae_st/__init__.py @@ -0,0 +1,15 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from brainscore_vision.model_helpers.activations.temporal.utils import get_specified_layers +from brainscore_vision.model_interface import BrainModel +from . import model + + +def commit_model(identifier): + activations_model=model.get_model(identifier) + layers=get_specified_layers(activations_model) + return ModelCommitment(identifier=identifier, activations_model=activations_model, layers=layers) + + +model_registry["MAE-ST-L"] = lambda: commit_model("MAE-ST-L") +model_registry["MAE-ST-G"] = lambda: commit_model("MAE-ST-G") \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_mae_st/model.py b/brainscore_vision/models/temporal_model_mae_st/model.py new file mode 100644 index 000000000..6f45c6073 --- /dev/null +++ b/brainscore_vision/models/temporal_model_mae_st/model.py @@ -0,0 +1,120 @@ +import torch +from iopath.common.file_io import g_pathmgr as pathmgr +from mae_st import models_vit +from mae_st.models_vit import VisionTransformer, nn, partial +from mae_st.util import misc +from mae_st.util.pos_embed import interpolate_pos_embed + +from brainscore_vision.model_helpers.activations.temporal.model import PytorchWrapper +from brainscore_vision.model_helpers.s3 import load_weight_file + + +def vit_huge_patch16(**kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=1280, + depth=32, + num_heads=16, + mlp_ratio=4, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + return model + +models_vit.__dict__["vit_huge_patch16"] = vit_huge_patch16 + + +LAYER_SELECT_STEP = 2 +mean = (0.45, 0.45, 0.45) +std = (0.225, 0.225, 0.225) + +from torchvision import transforms + +transform_img = transforms.Compose([ + transforms.Resize((224, 224)), + transforms.Normalize(mean, std), +]) + + +def transform_video(video): + import torch + frames = torch.Tensor(video.to_numpy() / 255.0).permute(0, 3, 1, 2) + frames = transform_img(frames) + return frames.permute(1, 0, 2, 3) + + + +def get_model(identifier): + + if identifier == "MAE-ST-L": + model_name = "vit_large_patch16" + num_blocks = 24 + feature_map_size = 14 + load_path = load_weight_file( + bucket="brainscore-vision", + relative_path="temporal_model_mae_st/mae_pretrain_vit_large_k400.pth", + version_id="cPcP4AzpG95CimQ5Pn.CHKnGUJlLXM3m", + sha1="c7fb91864a4ddf8b99309440121a3abe66b846bb" + ) + + elif identifier == "MAE-ST-G": + model_name = "vit_huge_patch16" + num_blocks = 32 + feature_map_size = 14 + load_path = load_weight_file( + bucket="brainscore-vision", + relative_path="temporal_model_mae_st/mae_pretrain_vit_huge_k400.pth", + version_id="IYKa8QiocgBzo3EhsBouS62HboK6iqYT", + sha1="177e48577142ca01949c08254834ffa1198b9eb4" + ) + + num_frames = 16 + t_patch_size = 2 + + model = models_vit.__dict__[model_name]( + num_frames=num_frames, + t_patch_size=t_patch_size, + cls_embed=True, + sep_pos_embed=True + ) + + with pathmgr.open(load_path, "rb") as f: + checkpoint = torch.load(f, map_location="cpu") + + print("Load pre-trained checkpoint from: %s" % load_path) + if "model" in checkpoint.keys(): + checkpoint_model = checkpoint["model"] + else: + checkpoint_model = checkpoint["model_state"] + # interpolate position embedding + interpolate_pos_embed(model, checkpoint_model) + + checkpoint_model = misc.convert_checkpoint(checkpoint_model) + + # load pre-trained model + msg = model.load_state_dict(checkpoint_model, strict=False) + print(msg) + + inferencer_kwargs = { + "fps": 6.25, + "layer_activation_format": { + "patch_embed": "THWC", + **{f"blocks.{i}": "THWC" for i in range(0, num_blocks, LAYER_SELECT_STEP)}, + # "head": "THWC" # weight not available + }, + "num_frames": num_frames, + } + + def process_activation(layer, layer_name, inputs, output): + B = output.shape[0] + C = output.shape[-1] + if layer_name.startswith("blocks"): + output = output[:, 1:] # remove cls token + output = output.reshape(B, -1, feature_map_size, feature_map_size, C) + return output + + wrapper = PytorchWrapper(identifier, model, transform_video, + process_output=process_activation, + **inferencer_kwargs) + + return wrapper diff --git a/brainscore_vision/models/temporal_model_mae_st/requirements.txt b/brainscore_vision/models/temporal_model_mae_st/requirements.txt new file mode 100644 index 000000000..0d1858c8e --- /dev/null +++ b/brainscore_vision/models/temporal_model_mae_st/requirements.txt @@ -0,0 +1,3 @@ +mae_st @ git+https://github.com/YingtianDt/mae_st.git +torch +torchvision \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_mae_st/test.py b/brainscore_vision/models/temporal_model_mae_st/test.py new file mode 100644 index 000000000..0f2b2cb56 --- /dev/null +++ b/brainscore_vision/models/temporal_model_mae_st/test.py @@ -0,0 +1,16 @@ +import pytest + +from brainscore_vision import load_model + + +model_list = [ + "MAE-ST-L", + "MAE-ST-G", +] + +@pytest.mark.private_access +@pytest.mark.memory_intense +@pytest.mark.parametrize("model_identifier", model_list) +def test_load(model_identifier): + model = load_model(model_identifier) + assert model is not None \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_mmaction2/__init__.py b/brainscore_vision/models/temporal_model_mmaction2/__init__.py new file mode 100644 index 000000000..685183ff9 --- /dev/null +++ b/brainscore_vision/models/temporal_model_mmaction2/__init__.py @@ -0,0 +1,23 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from brainscore_vision.model_helpers.activations.temporal.utils import get_specified_layers +from brainscore_vision.model_interface import BrainModel +from . import model + + +def commit_model(identifier): + activations_model=model.get_model(identifier) + layers=get_specified_layers(activations_model) + return ModelCommitment(identifier=identifier, activations_model=activations_model, layers=layers) + + +model_registry["I3D"] = lambda: commit_model("I3D") +model_registry["I3D-nonlocal"] = lambda: commit_model("I3D-nonlocal") +model_registry["SlowFast"] = lambda: commit_model("SlowFast") +model_registry["X3D"] = lambda: commit_model("X3D") +model_registry["TimeSformer"] = lambda: commit_model("TimeSformer") +model_registry["VideoSwin-B"] = lambda: commit_model("VideoSwin-B") +model_registry["VideoSwin-L"] = lambda: commit_model("VideoSwin-L") +model_registry["UniFormer-V1"] = lambda: commit_model("UniFormer-V1") +model_registry["UniFormer-V2-B"] = lambda: commit_model("UniFormer-V2-B") +model_registry["UniFormer-V2-L"] = lambda: commit_model("UniFormer-V2-L") diff --git a/brainscore_vision/models/temporal_model_mmaction2/mmaction2.csv b/brainscore_vision/models/temporal_model_mmaction2/mmaction2.csv new file mode 100644 index 000000000..9f253b57f --- /dev/null +++ b/brainscore_vision/models/temporal_model_mmaction2/mmaction2.csv @@ -0,0 +1,24 @@ +name,config,checkpoint,Kinetics400-top1,Kinetics400-top5,FLOPs,params,note +I3D,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb_20220812-e213c223.pth,73.47,91.27,43.5G,28.0M, +I3D-nonlocal,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb_20220812-8e1f2148.pth,,,,, +TSM,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb_20220831-64d69186.pth,75.12,91.55,65.75G,23.87M, +SlowFast,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/slowfast/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb_20220818-1cb6dfc8.pth,,,,, +X3D,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/x3d/x3d_m_16x5x1_facebook-kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/x3d/facebook/x3d_m_16x5x1_facebook-kinetics400-rgb_20201027-3f42382a.pth,,,,, +TimeSformer,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/timesformer/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb_20220815-a4d0d01f.pth,,,,, +VideoSwin-B,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-182ec6cc.pth,,,,, +VideoSwin-L,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-78ad8b11.pth,,,,, +UniFormer-V1,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/uniformer/uniformer-base_imagenet1k-pre_32x4x1_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv1/uniformer-base_imagenet1k-pre_32x4x1_kinetics400-rgb_20221219-b776322c.pth,,,,, +UniFormer-V2-B,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics400-rgb/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics400-rgb_20230313-75be0806.pth,,,,, +UniFormer-V2-L,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/uniformerv2/uniformerv2-large-p14-res224_clip-kinetics710-pre_u32_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/kinetics400/uniformerv2-large-p14-res224_clip-kinetics710-pre_u32_kinetics400-rgb_20221219-56a46f64.pth,,,,, +VideoMAE-V2,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/videomaev2/vit-base-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400.py,https://download.openmmlab.com/mmaction/v1.0/recognition/videomaev2/vit-base-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400/vit-base-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_20230510-3e7f93b2.pth,86.6,97.3,180G,87M, +VideoMAE-V1,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/videomae/vit-base-p16_videomae-k400-pre_16x4x1_kinetics-400.py,https://download.openmmlab.com/mmaction/v1.0/recognition/videomae/vit-base-p16_videomae-k400-pre_16x4x1_kinetics-400_20221013-860a3cd3.pth,81.3,95.0,180G,87M, +R2plus1D,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb_20220812-4270588c.pth,75.46,92.28,213G,63.8M, +I3D-nonlocal,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb_20220812-8e1f2148.pth,74.80,92.07,59.3G,35.4M, +TSN,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb_20220906-2692d16c.pth,74.12,91.34,102.7G,24.33M, +C3D,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb_20220811-31723200.pth,,,38.5G,78.4M, +UniFormer-V2,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/uniformerv2/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics400-rgb/uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics400-rgb_20230313-75be0806.pth,,,0.1T,115M, +VideoSwin,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-182ec6cc.pth,80.57,94.49,282G,88.0M, +C2D,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb_20221027-e0227b22.pth,73.44,91.00,33G,24.3M, +TSM-nonlocal,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb_20220831-108bfde5.pth,74.49,91.15,61.30G,31.68M, +CSN,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/csn/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb_20220811-44395bae.pth,79.44,94.26,55.90G,13.13M, +TPN,https://github.com/open-mmlab/mmaction2/blob/main/configs/recognition/tpn/tpn-slowonly_r50_8xb8-8x8x1-150e_kinetics400-rgb.py,https://download.openmmlab.com/mmaction/v1.0/recognition/tpn/tpn-slowonly_r50_8xb8-8x8x1-150e_kinetics400-rgb/tpn-slowonly_r50_8xb8-8x8x1-150e_kinetics400-rgb_20220913-97d0835d.pth,74.20,91.48,,, \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_mmaction2/model.py b/brainscore_vision/models/temporal_model_mmaction2/model.py new file mode 100644 index 000000000..351158842 --- /dev/null +++ b/brainscore_vision/models/temporal_model_mmaction2/model.py @@ -0,0 +1,226 @@ +import os +import numpy as np + +import mmengine +import mmaction +from mmaction.apis import init_recognizer +from mmengine.registry import init_default_scope +from mmengine.dataset import Compose, pseudo_collate + +from brainscore_vision.model_helpers.activations.temporal.model import PytorchWrapper + + +LARGE_MODEL_LAYER_STEP = 2 +HOME = os.path.join(os.path.dirname(mmaction.__file__), "models") + + +class MMActionModelWrapper(PytorchWrapper): + meta = None + + def load_meta(self, path=os.path.join(os.path.dirname(__file__), "mmaction2.csv")): + if self.meta is None: + import pandas as pd + self.meta = pd.read_csv(path) + + def __init__(self, model_name, process_output=None, *args, **kwargs): + self.load_meta() + + _num_frames = None + num_frames = kwargs.get("num_frames") + if isinstance(num_frames, (list, tuple)): + if num_frames[0] == num_frames[1]: + _num_frames = num_frames + elif num_frames is not None: + _num_frames = num_frames + + model_data = self.meta[self.meta['name'] == model_name].iloc[0] # return a Series + config = model_data['config'] + checkpoint = model_data['checkpoint'] + config = config.replace("https://github.com/open-mmlab/mmaction2/blob/main/", "") + config_path = os.path.join(HOME, config) + config = mmengine.Config.fromfile(config_path) + + test_pipeline_cfg = config.test_pipeline + # SampleFrames: clip_len x frame_interval (sampling interval) x num_clips + # change every ThreeCrop and TenCrop to CenterCrop + for i, pipeline in enumerate(test_pipeline_cfg): + if pipeline['type'] in ['ThreeCrop', 'TenCrop']: + test_pipeline_cfg[i] = {'type': 'CenterCrop', 'crop_size': pipeline['crop_size']} + if pipeline['type'] in ['SampleFrames']: + test_pipeline_cfg[i].update({"num_clips": 1, 'frame_interval': 1}) + + model = init_recognizer(config, checkpoint, device="cpu") + init_default_scope(model.cfg.get('default_scope', 'mmaction')) + test_pipeline = Compose(test_pipeline_cfg[3:]) + + def transform_video(video): + imgs = video.to_numpy() + data = {'imgs': imgs, 'num_clips': 1, 'modality': 'RGB'} + if _num_frames is not None: + data['clip_len'] = _num_frames + assert len(imgs) == _num_frames + else: + data['clip_len'] = len(imgs) + + data = test_pipeline(data) + return data + + super().__init__(model_name, model, transform_video, process_output, *args, **kwargs) + + def forward(self, inputs): + data = pseudo_collate(inputs) + data["inputs"] = [d.to(self._device) for d in data["inputs"]] + result = self._model.test_step(data)[0] + return result + + +def get_model(identifier): + if identifier == "I3D": + process_output = None + inferencer_kwargs = { + "fps": 12.5, + "layer_activation_format": { + "backbone.conv1": "CTHW", # too large: (C: 64, T: *, H: 128, W: 128) + **{f"backbone.layer{i}": "CTHW" for i in range(1, 5)}, + "cls_head": "C", + }, + "num_frames": (5, np.inf), + } + + if identifier == "I3D-nonlocal": + process_output = None + inferencer_kwargs = { + "fps": 12.5, + "layer_activation_format": { + "backbone.conv1": "CTHW", # too large: (C: 64, T: *, H: 128, W: 128) + **{f"backbone.layer{i}": "CTHW" for i in range(1, 5)}, + "cls_head": "C", + }, + "num_frames": (5, np.inf), + } + + if identifier == "TSM": + process_output = None + inferencer_kwargs = { + "fps": 25, + "layer_activation_format": {}, + } + + if identifier == "SlowFast": + process_output = None + inferencer_kwargs = { + "fps": 12.5, + "layer_activation_format": { + "backbone.slow_path.conv1_lateral": "CTHW", + **{f"backbone.slow_path.layer{i}_lateral": "CTHW" for i in range(1, 4)}, + "cls_head": "C", + }, + "num_frames": 32, # TODO: in fact can be multiple of 4? + } + + if identifier == "X3D": + process_output = None + inferencer_kwargs = { + "fps": 30, + "layer_activation_format": { + "backbone.conv1_t": "CTHW", + **{f"backbone.layer{i}": "CTHW" for i in range(1, 5)}, + "cls_head": "C", + }, + } + + if identifier == "TimeSformer": + inferencer_kwargs = { + "fps": 8, + "layer_activation_format": { + "backbone.patch_embed": "CTHW", + **{f"backbone.transformer_layers.layers.{i}": "HWTC" for i in range(0, 12, LARGE_MODEL_LAYER_STEP)}, + "cls_head": "C", + }, + "num_frames": 8 + } + def process_output(layer, layer_name, inputs, output): + if layer_name == "backbone.patch_embed": + B = inputs[0].shape[0] + C = output.shape[-1] + output = output.reshape(B, -1, 14, 14, C) + if layer_name.startswith("backbone.transformer_layers.layers."): + output = output[:, 1:] + B = output.shape[0] + C = output.shape[-1] + output = output.reshape(B, 14, 14, -1, C) + return output + + if identifier in ["VideoSwin-B", "VideoSwin-L"]: + + transformer_layers = { + **{f"backbone.layers.0.blocks.{i}": "THWC" for i in range(0, 2, LARGE_MODEL_LAYER_STEP)}, + **{f"backbone.layers.1.blocks.{i}": "THWC" for i in range(0, 2, LARGE_MODEL_LAYER_STEP)}, + **{f"backbone.layers.2.blocks.{i}": "THWC" for i in range(0, 18, LARGE_MODEL_LAYER_STEP)}, + **{f"backbone.layers.3.blocks.{i}": "THWC" for i in range(0, 2, LARGE_MODEL_LAYER_STEP)}, + } + + inferencer_kwargs = { + "fps": 12.5, + "layer_activation_format": { + "backbone.patch_embed": "CTHW", + **transformer_layers, + "cls_head": "C", + }, + } + process_output = None + + if identifier == "UniFormer-V1": + + transformer_layers = { + **{f"backbone.blocks1.{i}": "CTHW" for i in range(0, 5, LARGE_MODEL_LAYER_STEP)}, + **{f"backbone.blocks2.{i}": "CTHW" for i in range(0, 8, LARGE_MODEL_LAYER_STEP)}, + **{f"backbone.blocks3.{i}": "CTHW" for i in range(0, 20, LARGE_MODEL_LAYER_STEP)}, + **{f"backbone.blocks4.{i}": "CTHW" for i in range(0, 7, LARGE_MODEL_LAYER_STEP)}, + } + + inferencer_kwargs = { + "fps": 6.25, + "layer_activation_format": { + "backbone.pos_drop": "CTHW", + **transformer_layers, + "cls_head": "C", + }, + } + process_output = None + + if identifier.startswith("UniFormer-V2"): + + if identifier == "UniFormer-V2-B": + num_frames = 8 + num_transformer_layers = 12 + img_size = 14 + elif identifier == "UniFormer-V2-L": + num_frames = 32 + num_transformer_layers = 24 + img_size = 16 + + transformer_layers = { + **{f"backbone.transformer.resblocks.{i}": "HWTC" for i in range(0, num_transformer_layers, LARGE_MODEL_LAYER_STEP)}, + } + + inferencer_kwargs = { + "fps": 25, + "layer_activation_format": { + "backbone.conv1": "CTHW", + **transformer_layers, + "backbone": "C", + "cls_head": "C", + }, + "num_frames": num_frames + } + def process_output(layer, layer_name, inputs, output): + if layer_name.startswith("backbone.transformer.resblocks."): + T = inputs[1] + C = output.shape[-1] + output = output[1:] # remove the class token + output = output.reshape(img_size, img_size, -1, T, C).permute(2, 0, 1, 3, 4) # BHWTC + return output + + model = MMActionModelWrapper(identifier, process_output, **inferencer_kwargs) + return model \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_mmaction2/requirements.txt b/brainscore_vision/models/temporal_model_mmaction2/requirements.txt new file mode 100644 index 000000000..1caa728dd --- /dev/null +++ b/brainscore_vision/models/temporal_model_mmaction2/requirements.txt @@ -0,0 +1,5 @@ +importlib-metadata<5 +mmaction2 @ git+https://github.com/YingtianDt/mmaction2.git@533edc3 +mmengine +torch +torchvision \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_mmaction2/test.py b/brainscore_vision/models/temporal_model_mmaction2/test.py new file mode 100644 index 000000000..d539c623a --- /dev/null +++ b/brainscore_vision/models/temporal_model_mmaction2/test.py @@ -0,0 +1,24 @@ +import pytest + +from brainscore_vision import load_model + + +model_list = [ + "I3D", + "I3D-nonlocal", + "SlowFast", + "X3D", + "TimeSformer", + "VideoSwin-B", + "VideoSwin-L", + "UniFormer-V1", + "UniFormer-V2-B", + "UniFormer-V2-L", +] + +@pytest.mark.private_access +@pytest.mark.memory_intense +@pytest.mark.parametrize("model_identifier", model_list) +def test_load(model_identifier): + model = load_model(model_identifier) + assert model is not None \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_openstl/__init__.py b/brainscore_vision/models/temporal_model_openstl/__init__.py new file mode 100644 index 000000000..2b49cc845 --- /dev/null +++ b/brainscore_vision/models/temporal_model_openstl/__init__.py @@ -0,0 +1,19 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from brainscore_vision.model_helpers.activations.temporal.utils import get_specified_layers +from brainscore_vision.model_interface import BrainModel +from . import model + + +def commit_model(identifier): + activations_model=model.get_model(identifier) + layers=get_specified_layers(activations_model) + return ModelCommitment(identifier=identifier, activations_model=activations_model, layers=layers) + + +model_registry["ConvLSTM"] = lambda: commit_model("ConvLSTM") +model_registry["PredRNN"] = lambda: commit_model("PredRNN") +# model_registry["PredNet"] = lambda: commit_model("PredNet") +model_registry["SimVP"] = lambda: commit_model("SimVP") +model_registry["TAU"] = lambda: commit_model("TAU") +model_registry["MIM"] = lambda: commit_model("MIM") diff --git a/brainscore_vision/models/temporal_model_openstl/model.py b/brainscore_vision/models/temporal_model_openstl/model.py new file mode 100644 index 000000000..aed3e0464 --- /dev/null +++ b/brainscore_vision/models/temporal_model_openstl/model.py @@ -0,0 +1,223 @@ +import os +import imp +import numpy as np +from collections import OrderedDict + +import torch +from torchvision import transforms +import openstl +from openstl.methods import method_maps +from openstl.utils import reshape_patch + +from brainscore_vision.model_helpers.activations.temporal.model import PytorchWrapper +from brainscore_vision.model_helpers.activations.temporal.utils import download_weight_file + + +# We only use models trained on KITTI dataset, because it is the most ecological, +# diverse, challenging, and widely used dataset for next frame prediction among +# the datasets used by OpenSTL repo. +IMAGE_SIZES = (128, 160) # for KITTI +KITTI_CONFIG_DIR = os.path.join(os.path.dirname(openstl.__file__), "configs/kitticaltech") +KITTI_FPS = 10 # from paper: https://www.cvlibs.net/publications/Geiger2012CVPR.pdf +LARGE_MODEL_LAYER_STEP = 2 + +transform_image = transforms.Resize(IMAGE_SIZES) + + +class LSTMWrapper(PytorchWrapper): + def _register_hook(self, layer, layer_name, target_dict): + def hook_function(_layer, _input, output, name=layer_name, target_dict=target_dict): + output = self._process_activation(_layer, name, _input, output) + target_dict.setdefault(name, []).append(PytorchWrapper._tensor_to_numpy(output)) + + hook = layer.register_forward_hook(hook_function) + return hook + + def get_activations(self, inputs, layer_names): + self._model.eval() + layer_results = OrderedDict() + hooks = [] + + for layer_name in layer_names: + layer = self.get_layer(layer_name) + hook = self._register_hook(layer, layer_name, target_dict=layer_results) + hooks.append(hook) + + with torch.no_grad(): + self.forward(inputs) + + for hook in hooks: + hook.remove() + + # stack the T dim to be the second dim + for layer_name, activations in layer_results.items(): + layer_results[layer_name] = np.stack(activations, axis=1) + + return layer_results + + def forward(self, inputs): + tensor = torch.stack(inputs) + tensor = tensor.to(self._device) + return self._model(tensor, return_loss=False) + + +class MIMWrapper(LSTMWrapper): + def forward(self, inputs): + output = super().forward(inputs) + # clear MIMBlock.convlstm_c + def _clear_helper(module): + if hasattr(module, "convlstm_c"): + module.convlstm_c = None + for child in module.children(): + _clear_helper(child) + _clear_helper(self._model) + return output + + +def _get_config(name, parent_dir): + config = imp.load_source(name, os.path.join(parent_dir, f"{name}.py")).__dict__ + config = {k: v for k, v in config.items() if not k.startswith("__")} + return config + + +def get_model(identifier): + config = _get_config(identifier, KITTI_CONFIG_DIR) + config["method"] = config["method"].lower() + config['dataname'] = "kitticaltech" + config['dataname'] = "kitticaltech" + config['metrics'] = ['mse', 'mae'] # not in use, just to initialize the model + config['in_shape'] = [None, 3, *IMAGE_SIZES] + + if identifier == "PredRNN": + layer_activation_format = { + **{f"cell_list.{i}": "TCHW" for i in range(4)}, + "conv_last": "TCHW" + } + + def process_output(layer, layer_name, inputs, output): + if layer_name.startswith("cell_list"): + h, c, m = output + return m + else: + return output + + wrapper_cls = LSTMWrapper + kwargs = {} + weight_name = "kitticaltech_predrnn_one_ep100.pth" + + elif identifier == "PredNet": + layer_activation_format = { + **{f"layer{i}": "TCHW" for i in range(4)}, + "layer5": "TCHW" + } + + def process_output(layer, layer_name, inputs, output): + if layer_name.startswith("cell_list"): + h, c = output + return c + else: + return output + + wrapper_cls = LSTMWrapper + kwargs = {} + weight_name = "kitticaltech_prednet_one_ep100.pth" + + elif identifier == "ConvLSTM": + layer_activation_format = { + **{f"cell_list.{i}": "TCHW" for i in range(4)}, + "conv_last": "TCHW" + } + + def process_output(layer, layer_name, inputs, output): + if layer_name.startswith("cell_list"): + h, c = output + return c + else: + return output + + wrapper_cls = LSTMWrapper + kwargs = {} + weight_name = "kitticaltech_convlstm_one_ep100.pth" + + elif identifier in ["SimVP", "TAU"]: + num_frames = 10 + layer_activation_format = { + **{f"enc.enc.{i}": "TCHW" for i in range(0, 2, LARGE_MODEL_LAYER_STEP)}, + **{f"hid.enc.{i}": "TCHW" for i in range(0, 6, LARGE_MODEL_LAYER_STEP)}, + **{f"dec.dec.{i}": "TCHW" for i in range(0, 2, LARGE_MODEL_LAYER_STEP)}, + } + + config['in_shape'] = [num_frames, 3, *IMAGE_SIZES] + wrapper_cls = PytorchWrapper + kwargs = {"num_frames": num_frames} + + def process_output(layer, layer_name, inputs, output): + if layer_name.startswith("enc") or layer_name.startswith("dec"): + output = output.view(-1, num_frames, *output.shape[1:]) + elif layer_name.startswith("hid"): + output = output[:, None] # time-compressed layers + return output + if identifier == "SimVP": + weight_name = "kitticaltech_simvp_gsta_one_ep100.pth" + elif identifier == "TAU": + weight_name = "kitticaltech_tau_one_ep100.pth" + + elif identifier == "MIM": + layer_activation_format = { + **{f"stlstm_layer.{i}": "TCHW" for i in range(0, 4, LARGE_MODEL_LAYER_STEP)}, + **{f"stlstm_layer_diff.{i}": "TCHW" for i in range(0, 3, LARGE_MODEL_LAYER_STEP)}, + "conv_last": "TCHW" + } + + def process_output(layer, layer_name, inputs, output): + if layer_name.startswith("stlstm_layer."): + h, c, m = output + ret = m + elif layer_name.startswith("stlstm_layer_diff."): + h, c = output + ret = c + else: + ret = output + return ret + + wrapper_cls = MIMWrapper + kwargs = {} + weight_name = "kitticaltech_mim_one_ep100.pth" + + + model = method_maps[config["method"]](**config).model + url = f"https://github.com/chengtan9907/OpenSTL/releases/download/kitti-weights/{weight_name}" + weight_path = download_weight_file(url, folder="temporal_model_openstl") + model.load_state_dict(torch.load(weight_path, map_location="cpu")) + + def transform_video_lstm(video): + frames = torch.Tensor(video.to_numpy() / 255.0).permute(0, 3, 1, 2) + frames = transform_image(frames) + frames = frames.permute(0, 2, 3, 1)[None, :] # BTHWC + patch_size = config["patch_size"] + assert 5 == frames.ndim + batch_size, seq_length, img_height, img_width, num_channels = frames.shape + a = frames.reshape(batch_size, seq_length, + img_height//patch_size, patch_size, + img_width//patch_size, patch_size, + num_channels) + b = a.transpose(3, 4) + patches = b.reshape(batch_size, seq_length, + img_height//patch_size, + img_width//patch_size, + patch_size*patch_size*num_channels)[0] + return patches + + def transform_video_simvp(video): + frames = torch.Tensor(video.to_numpy() / 255.0).permute(0, 3, 1, 2) + frames = transform_image(frames) + return frames + + if identifier in ("PredRNN", "ConvLSTM", "MIM"): + transform_video = transform_video_lstm + else: + transform_video = transform_video_simvp + + return wrapper_cls(identifier, model, transform_video, fps=KITTI_FPS, + layer_activation_format=layer_activation_format, + process_output=process_output, **kwargs) \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_openstl/requirements.txt b/brainscore_vision/models/temporal_model_openstl/requirements.txt new file mode 100644 index 000000000..4d839c16f --- /dev/null +++ b/brainscore_vision/models/temporal_model_openstl/requirements.txt @@ -0,0 +1,3 @@ +torch +torchvision +openstl @ git+https://github.com/YingtianDt/OpenSTL.git \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_openstl/test.py b/brainscore_vision/models/temporal_model_openstl/test.py new file mode 100644 index 000000000..4d52b76ce --- /dev/null +++ b/brainscore_vision/models/temporal_model_openstl/test.py @@ -0,0 +1,20 @@ +import pytest + +from brainscore_vision import load_model + + +model_list = [ + "ConvLSTM", + "PredRNN", + "PredNet", + "SimVP", + "TAU", + "MIM" +] + +@pytest.mark.private_access +@pytest.mark.memory_intense +@pytest.mark.parametrize("model_identifier", model_list) +def test_load(model_identifier): + model = load_model(model_identifier) + assert model is not None \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_torchvision/__init__.py b/brainscore_vision/models/temporal_model_torchvision/__init__.py new file mode 100644 index 000000000..54820f87f --- /dev/null +++ b/brainscore_vision/models/temporal_model_torchvision/__init__.py @@ -0,0 +1,19 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from brainscore_vision.model_helpers.activations.temporal.utils import get_specified_layers +from brainscore_vision.model_interface import BrainModel +from . import model + + +def commit_model(identifier): + activations_model=model.get_model(identifier) + layers=get_specified_layers(activations_model) + return ModelCommitment(identifier=identifier, activations_model=activations_model, layers=layers) + + +model_registry['r3d_18'] = lambda: commit_model('r3d_18') +model_registry['r2plus1d_18'] = lambda: commit_model('r2plus1d_18') +model_registry['mc3_18'] = lambda: commit_model('mc3_18') +model_registry['s3d'] = lambda: commit_model('s3d') +model_registry['mvit_v1_b'] = lambda: commit_model('mvit_v1_b') +model_registry['mvit_v2_s'] = lambda: commit_model('mvit_v2_s') \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_torchvision/model.py b/brainscore_vision/models/temporal_model_torchvision/model.py new file mode 100644 index 000000000..30d96aba8 --- /dev/null +++ b/brainscore_vision/models/temporal_model_torchvision/model.py @@ -0,0 +1,92 @@ +import torch +import numpy as np +from torchvision import transforms +from torchvision.models import video as vid + +from brainscore_vision.model_helpers.activations.temporal.model.pytorch import PytorchWrapper + + +LARGE_MODEL_LAYER_STEP = 2 + +def get_transform_video(transform_img): + def transform_video(video): + frames = video.to_numpy() / 255. + frames = torch.Tensor(frames) + frames = frames.permute(0, 3, 1, 2) + frames = transform_img(frames) + return frames.permute(1, 0, 2, 3) + return transform_video + + +def get_model(identifier): + if identifier in ["r3d_18", "r2plus1d_18", "mc3_18"]: + img_transform = transforms.Compose([ + transforms.Resize((128, 171)), + transforms.CenterCrop(112), + transforms.Normalize(mean=[0.43216, 0.394666, 0.37645], std=[0.22803, 0.22145, 0.216989]) + ]) + inferencer_kwargs = { + "fps": 25, + "layer_activation_format": + { + "stem": "CTHW", + **{f'layer{i}': "CTHW" for i in range(1, 5)}, + "avgpool": "CTHW", + "fc": "C" + }, + } + process_output = None + + elif identifier == "s3d": + img_transform = transforms.Compose([ + transforms.Resize((256, 256)), + transforms.CenterCrop(224), + transforms.Normalize(mean=[0.43216, 0.394666, 0.37645], std=[0.22803, 0.22145, 0.216989]) + ]) + inferencer_kwargs = { + "fps": 15, + "num_frames": (13, np.inf), + "layer_activation_format": + { + **{f"features.{i}": "CTHW" for i in range(0, 16, LARGE_MODEL_LAYER_STEP)}, + "avgpool": "CTHW", + "classifier": "CTHW" + } + } + process_output = None + + elif identifier in ["mvit_v1_b", "mvit_v2_s"]: + img_transform = transforms.Compose([ + transforms.Resize((256, 256)), + transforms.CenterCrop(224), + transforms.Normalize(mean=[0.43216, 0.394666, 0.37645], std=[0.22803, 0.22145, 0.216989]) + ]) + inferencer_kwargs = { + "fps": 7.5, + "num_frames": 16, + "layer_activation_format": { + "conv_proj": "CTHW", + **{f"blocks.{i}": "THWC" for i in range(0, 16, LARGE_MODEL_LAYER_STEP)}, + "head": "C", + } + } + + def process_output(layer, layer_name, input, output): + if layer_name.startswith("blocks"): + output, thw = output + t, h, w = thw + output = output[:, 1:] # remove cls + b, n, c = output.shape + assert n == t*h*w + output = output.view(b, t, h, w, c) + return output + return output + + vid_transform = get_transform_video(img_transform) + model_name = identifier + model = getattr(vid, model_name)(weights="KINETICS400_V1") + wrapper = PytorchWrapper(identifier, model, vid_transform, + process_output=process_output, + **inferencer_kwargs) + + return wrapper \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_torchvision/requirements.txt b/brainscore_vision/models/temporal_model_torchvision/requirements.txt new file mode 100644 index 000000000..37f700a78 --- /dev/null +++ b/brainscore_vision/models/temporal_model_torchvision/requirements.txt @@ -0,0 +1,2 @@ +torch +torchvision \ No newline at end of file diff --git a/brainscore_vision/models/temporal_model_torchvision/test.py b/brainscore_vision/models/temporal_model_torchvision/test.py new file mode 100644 index 000000000..77486ad03 --- /dev/null +++ b/brainscore_vision/models/temporal_model_torchvision/test.py @@ -0,0 +1,20 @@ +import pytest + +from brainscore_vision import load_model + + +model_list = [ + "r3d_18", + "r2plus1d_18", + "mc3_18", + "s3d", + "mvit_v1_b", + "mvit_v2_s", +] + +@pytest.mark.private_access +@pytest.mark.memory_intense +@pytest.mark.parametrize("model_identifier", model_list) +def test_load(model_identifier): + model = load_model(model_identifier) + assert model is not None \ No newline at end of file diff --git a/tests/test_model_helpers/temporal/activations/test_inputs.py b/tests/test_model_helpers/temporal/activations/test_inputs.py index a49504f8a..fb2cb7843 100644 --- a/tests/test_model_helpers/temporal/activations/test_inputs.py +++ b/tests/test_model_helpers/temporal/activations/test_inputs.py @@ -40,6 +40,7 @@ def test_video(): video3 = video1.set_window(-10, 0, padding="repeat") video4 = video1.set_window(-20, -10, padding="repeat") assert (video3.to_numpy() == video4.to_numpy()).all() + assert (video3.to_numpy()[0] == video1.to_numpy()[0]).all() assert video2.fps == 30 assert video2.set_fps(1).to_numpy().shape[0] == 1 From 79cdf6841bf0180c07446a810728ce6655dc4300 Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Wed, 26 Jun 2024 09:17:20 -0400 Subject: [PATCH 11/68] Add resnext101_32x32d_wsl (#962) Co-authored-by: Ethan Pellegrini --- .../models/resnext101_32x32d_wsl/__init__.py | 7 ++++ .../models/resnext101_32x32d_wsl/model.py | 40 +++++++++++++++++++ .../resnext101_32x32d_wsl/requirements.txt | 2 + .../models/resnext101_32x32d_wsl/test.py | 8 ++++ 4 files changed, 57 insertions(+) create mode 100644 brainscore_vision/models/resnext101_32x32d_wsl/__init__.py create mode 100644 brainscore_vision/models/resnext101_32x32d_wsl/model.py create mode 100644 brainscore_vision/models/resnext101_32x32d_wsl/requirements.txt create mode 100644 brainscore_vision/models/resnext101_32x32d_wsl/test.py diff --git a/brainscore_vision/models/resnext101_32x32d_wsl/__init__.py b/brainscore_vision/models/resnext101_32x32d_wsl/__init__.py new file mode 100644 index 000000000..dd3446941 --- /dev/null +++ b/brainscore_vision/models/resnext101_32x32d_wsl/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['resnext101_32x32d_wsl'] = lambda: ModelCommitment(identifier='resnext101_32x32d_wsl', + activations_model=get_model('resnext101_32x32d_wsl'), + layers=get_layers('resnext101_32x32d_wsl')) \ No newline at end of file diff --git a/brainscore_vision/models/resnext101_32x32d_wsl/model.py b/brainscore_vision/models/resnext101_32x32d_wsl/model.py new file mode 100644 index 000000000..51e22396e --- /dev/null +++ b/brainscore_vision/models/resnext101_32x32d_wsl/model.py @@ -0,0 +1,40 @@ +import functools +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from brainscore_vision.model_helpers.check_submission import check_models +import torch.hub +import ssl + + +ssl._create_default_https_context = ssl._create_unverified_context + +def get_model(name): + assert name == 'resnext101_32x32d_wsl' + model_identifier = "resnext101_32x32d_wsl" + model = torch.hub.load('facebookresearch/WSL-Images', model_identifier) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + batch_size = {8: 32, 16: 16, 32: 8, 48: 4} + wrapper = PytorchWrapper(identifier=model_identifier, model=model, preprocessing=preprocessing, + batch_size=batch_size[32]) + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'resnext101_32x32d_wsl' + return (['conv1'] + + # note that while relu is used multiple times, by default the last one will overwrite all previous ones + [f"layer{block + 1}.{unit}.relu" + for block, block_units in enumerate([3, 4, 23, 3]) for unit in range(block_units)] + + ['avgpool']) + + +def get_bibtex(model_identifier): + """ + A method returning the bibtex reference of the requested model as a string. + """ + return """""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) \ No newline at end of file diff --git a/brainscore_vision/models/resnext101_32x32d_wsl/requirements.txt b/brainscore_vision/models/resnext101_32x32d_wsl/requirements.txt new file mode 100644 index 000000000..a56666d38 --- /dev/null +++ b/brainscore_vision/models/resnext101_32x32d_wsl/requirements.txt @@ -0,0 +1,2 @@ +torchvision +torch \ No newline at end of file diff --git a/brainscore_vision/models/resnext101_32x32d_wsl/test.py b/brainscore_vision/models/resnext101_32x32d_wsl/test.py new file mode 100644 index 000000000..9e0d951ba --- /dev/null +++ b/brainscore_vision/models/resnext101_32x32d_wsl/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('resnext101_32x32d_wsl') + assert model.identifier == 'resnext101_32x32d_wsl' \ No newline at end of file From d88db5ab57ae747a0025cab12c42a5de3a1af965 Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Fri, 28 Jun 2024 13:24:49 -0400 Subject: [PATCH 12/68] Ep/add resnext101 32x48d wsl (#961) * Add resnext101_32x48d_wsl * Remove get_model_list --------- Co-authored-by: Ethan Pellegrini --- .../models/resnext101_32x48d_wsl/__init__.py | 7 ++++ .../models/resnext101_32x48d_wsl/model.py | 38 +++++++++++++++++++ .../resnext101_32x48d_wsl/requirements.txt | 3 ++ .../models/resnext101_32x48d_wsl/test.py | 8 ++++ 4 files changed, 56 insertions(+) create mode 100644 brainscore_vision/models/resnext101_32x48d_wsl/__init__.py create mode 100644 brainscore_vision/models/resnext101_32x48d_wsl/model.py create mode 100644 brainscore_vision/models/resnext101_32x48d_wsl/requirements.txt create mode 100644 brainscore_vision/models/resnext101_32x48d_wsl/test.py diff --git a/brainscore_vision/models/resnext101_32x48d_wsl/__init__.py b/brainscore_vision/models/resnext101_32x48d_wsl/__init__.py new file mode 100644 index 000000000..e566d9ea2 --- /dev/null +++ b/brainscore_vision/models/resnext101_32x48d_wsl/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['resnext101_32x48d_wsl'] = lambda: ModelCommitment(identifier='resnext101_32x48d_wsl', + activations_model=get_model('resnext101_32x48d_wsl'), + layers=get_layers('resnext101_32x48d_wsl')) \ No newline at end of file diff --git a/brainscore_vision/models/resnext101_32x48d_wsl/model.py b/brainscore_vision/models/resnext101_32x48d_wsl/model.py new file mode 100644 index 000000000..b3e08272b --- /dev/null +++ b/brainscore_vision/models/resnext101_32x48d_wsl/model.py @@ -0,0 +1,38 @@ +import functools +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +import torch.hub +import ssl +from brainscore_vision.model_helpers.check_submission import check_models + + +ssl._create_default_https_context = ssl._create_unverified_context + + +def get_model(name): + assert name == 'resnext101_32x48d_wsl' + model_identifier = "resnext101_32x48d_wsl" + model = torch.hub.load('facebookresearch/WSL-Images', model_identifier) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + batch_size = {8: 32, 16: 16, 32: 8, 48: 4} + wrapper = PytorchWrapper(identifier=model_identifier, model=model, preprocessing=preprocessing, + batch_size=batch_size[48]) + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'resnext101_32x48d_wsl' + return (['conv1'] + + # note that while relu is used multiple times, by default the last one will overwrite all previous ones + [f"layer{block + 1}.{unit}.relu" + for block, block_units in enumerate([3, 4, 23, 3]) for unit in range(block_units)] + + ['avgpool']) + + +def get_bibtex(model_identifier): + return """x""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/resnext101_32x48d_wsl/requirements.txt b/brainscore_vision/models/resnext101_32x48d_wsl/requirements.txt new file mode 100644 index 000000000..5cad17cdd --- /dev/null +++ b/brainscore_vision/models/resnext101_32x48d_wsl/requirements.txt @@ -0,0 +1,3 @@ +torchvision +torch +torch \ No newline at end of file diff --git a/brainscore_vision/models/resnext101_32x48d_wsl/test.py b/brainscore_vision/models/resnext101_32x48d_wsl/test.py new file mode 100644 index 000000000..8e223f69f --- /dev/null +++ b/brainscore_vision/models/resnext101_32x48d_wsl/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('resnext101_32x48d_wsl') + assert model.identifier == 'resnext101_32x48d_wsl' \ No newline at end of file From 0c668d28f8f0db19d7ed2dfd1a9764bf3c9b90d7 Mon Sep 17 00:00:00 2001 From: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Date: Fri, 28 Jun 2024 13:58:54 -0400 Subject: [PATCH 13/68] removing functools, dependency included in python (#903) --- brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt b/brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt index 8ce28e204..a56666d38 100644 --- a/brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt +++ b/brainscore_vision/models/resnext101_32x8d_wsl/requirements.txt @@ -1,3 +1,2 @@ torchvision -torch -functools \ No newline at end of file +torch \ No newline at end of file From 48a6391fc8616d329d85ac42d51f7a90b8d95e57 Mon Sep 17 00:00:00 2001 From: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Date: Fri, 28 Jun 2024 14:52:19 -0400 Subject: [PATCH 14/68] Pinning python subversion in travis to avoid conda failure (#973) * Pinning python subversion to 3.7.13 in travis to avoid conda failure * Overriding Jenkins as code passes in travis, only changes .travis.yaml --- .travis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index d3b99c2d6..69e9e9b03 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,17 +32,17 @@ import: jobs: include: - name: 3.7 public - python: '3.7' + python: '3.7.13' - name: 3.7 private if: fork = false - python: '3.7' + python: '3.7.13' env: - PRIVATE_ACCESS=1 - secure: f1rWEwrslh7qa2g/QlKs001sGC3uaOxZNQSfNOPj+TMCqEo2c6OzImC4hyz+WqCyc6N/lFT4yYo2RhvaqStHMRmu/+9aZmuH05Bb0KQpfzNFA+yGa/U5WR3/4u6KRvDAeNEi9drT2LuacTyGbldmQsquujK0jrPpFWpe7zUUKv0zb0lJf0zcjeSrZlDXLlgD6DCqow7OqHRvW04dPZVy1OArRwtPV6DJ6Rqo1MqFQGHJ806VPlXhSoydb7a58dhGajqPjomdmZjhd3wS6Lv6uetTE/VVb4EP4e7n0qfZIx/TpnWG0SR44pcP7OCNARWYANsAivzxnQ0shyXnIzOo8ZcPYiPpt/5D53i5idTBxXyuDaHGQvgwuY5XLZzznEedBgZa4OvjxAXlLEQjdVDfSsZeYaV9gyFkeTlLnK1zvWi0US38eF2Qtm3Sx3D/5TtBKK2n38tyK5gg/XvJNycaXvIl7iVcnI2ifpqD1mUWI6C9j9Tk19/XEpWkwaFi91+0LZF1GhjBu8o3G5Np4RIOKXi3TIHkpbMM5mf11T6Bm9LvEMq1h8bgRQigEbeJF8CbUOSVFv+AaXsggGjQhuwdyvy2JZo+tO1nfhi+kW3XrDGPsz1R7Wfqduyn7UUh5OiFymeZwKseYKnwU47KyCqDwrq5Mnx1MlSidnVmPriadR4= - secure: WE7FPwy07VzJTKAd2xwZdBhtmh8jk7ojwk4B2rIcBQu0vwUXc1MgO8tBLD7s08lBedBjqZiLZEW31uPMEyWNysouDt16a5gm2d149LR7flI3MOifBtxINfJuC3eOEG65bPgN/bYEsIpLKnu3469d5nxZkK7xsjbWTxHGoUpLvVPsmHY2ZM5/jftybs7fI0do4NMG2XffKfZbiFb447Ao3xeQeEfW6IkJllzgGnlG9FJATFidrbwDNdmzAnvPEnDoKAf7ZvhPV0x9yR5V6P4Ck5hxl8mlPdBa1cRMO8s/1ag1c7YJ3AF9ZlwcwqTiGsT8DHTVRxSz4nFHJTMlrm9j84u7WzLZJBhPgF0UeLN3AQgiAZ3c2TFDvjQWeHVuSPkV5GrKlfhSvR82s9yPEdHQxxwYymBbAr6rJR4NtXTyZX0vg8NRKHssZKLSafs/D/pt9xXspqu8HAHc+mS0lCips79XptSr5BEsioil3D2io3tbzrGugpTeJ7oEA787vKn2Cm4XmhyQ0UBhvwsPZ351l27wZYuNV07o9Ik83hN/w4o2v899QQ/zbX42Iy8ZUCWOPX7MV7+TA7SMxru3qx7HL5hDM8kTetxbLB6Ckr+JOdX8L2Fb5L3TVDpsvfv0ebXgwaQR/ez8/7bcXmBqcERApHDz73HaMXUap+iDR4FLdXE= - AWS_DEFAULT_REGION=us-east-1 - stage: "Automerge check" - python: '3.7' + python: '3.7.13' install: - pip install --no-cache-dir torch torchvision --default-timeout=1000 --retries=5 - pip install --no-cache-dir -e ".[test]" From 6c09afb8b21ee73474133c534eed57a20f6245ff Mon Sep 17 00:00:00 2001 From: Katherine Fairchild Date: Fri, 28 Jun 2024 20:51:12 -0400 Subject: [PATCH 15/68] add eBarlow_Vanilla_1_full to models (#802) Co-authored-by: AutoJenkins Co-authored-by: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .../models/eBarlow_Vanilla_1_full/__init__.py | 9 ++ .../models/eBarlow_Vanilla_1_full/model.py | 84 +++++++++++++++++++ .../models/eBarlow_Vanilla_1_full/setup.py | 25 ++++++ .../models/eBarlow_Vanilla_1_full/test.py | 1 + 4 files changed, 119 insertions(+) create mode 100644 brainscore_vision/models/eBarlow_Vanilla_1_full/__init__.py create mode 100644 brainscore_vision/models/eBarlow_Vanilla_1_full/model.py create mode 100644 brainscore_vision/models/eBarlow_Vanilla_1_full/setup.py create mode 100644 brainscore_vision/models/eBarlow_Vanilla_1_full/test.py diff --git a/brainscore_vision/models/eBarlow_Vanilla_1_full/__init__.py b/brainscore_vision/models/eBarlow_Vanilla_1_full/__init__.py new file mode 100644 index 000000000..3fb240524 --- /dev/null +++ b/brainscore_vision/models/eBarlow_Vanilla_1_full/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry["eBarlow_Vanilla_1_full"] = lambda: ModelCommitment( + identifier="eBarlow_Vanilla_1_full", + activations_model=get_model("eBarlow_Vanilla_1_full"), + layers=get_layers("eBarlow_Vanilla_1_full"), +) diff --git a/brainscore_vision/models/eBarlow_Vanilla_1_full/model.py b/brainscore_vision/models/eBarlow_Vanilla_1_full/model.py new file mode 100644 index 000000000..f984da8da --- /dev/null +++ b/brainscore_vision/models/eBarlow_Vanilla_1_full/model.py @@ -0,0 +1,84 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +from urllib.request import urlretrieve +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch +from collections import OrderedDict + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ["eBarlow_Vanilla_1_full"] + + +def get_model(name): + assert name == "eBarlow_Vanilla_1_full" + url = "https://users.flatironinstitute.org/~tyerxa/equi_proj/training_checkpoints/fresh/vanilla/Barlow_1/latest-rank0" + fh = urlretrieve(url) + state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"] + model = load_composer_classifier(state_dict) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + +def load_composer_classifier(sd): + model = torchvision.models.resnet.resnet50() + new_sd = OrderedDict() + for k, v in sd.items(): + if 'lin_cls' in k: + new_sd['fc.' + k.split('.')[-1]] = v + if ".f." not in k: + continue + parts = k.split(".") + idx = parts.index("f") + new_k = ".".join(parts[idx + 1 :]) + new_sd[new_k] = v + model.load_state_dict(new_sd, strict=True) + return model + +def get_layers(name): + assert name == "eBarlow_Vanilla_1_full" + layers = [ + "layer1.0", + "layer1.1", + "layer1.2", + "layer2.0", + "layer2.1", + "layer2.2", + "layer2.3", + "layer3.0", + "layer3.1", + "layer3.2", + "layer3.3", + "layer3.4", + "layer3.5", + "layer4.0", + "layer4.1", + "layer4.2", + "avgpool", + "fc", + ] + outs = ["conv1", "layer1", "layer2", "layer3", "layer4"] + + return layers + outs + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == "__main__": + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/eBarlow_Vanilla_1_full/setup.py b/brainscore_vision/models/eBarlow_Vanilla_1_full/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/eBarlow_Vanilla_1_full/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/eBarlow_Vanilla_1_full/test.py b/brainscore_vision/models/eBarlow_Vanilla_1_full/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/eBarlow_Vanilla_1_full/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration From b78e9562f65cf2b153d56eb47af4a2d69fb671cc Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Mon, 1 Jul 2024 10:09:33 -0400 Subject: [PATCH 16/68] Add hmax model (#994) Co-authored-by: Ethan Pellegrini --- brainscore_vision/models/hmax/__init__.py | 7 + brainscore_vision/models/hmax/helpers/hmax.py | 438 ++++++++++++++++++ .../models/hmax/helpers/pytorch.py | 216 +++++++++ brainscore_vision/models/hmax/model.py | 69 +++ .../models/hmax/requirements.txt | 8 + brainscore_vision/models/hmax/test.py | 8 + 6 files changed, 746 insertions(+) create mode 100644 brainscore_vision/models/hmax/__init__.py create mode 100644 brainscore_vision/models/hmax/helpers/hmax.py create mode 100644 brainscore_vision/models/hmax/helpers/pytorch.py create mode 100644 brainscore_vision/models/hmax/model.py create mode 100644 brainscore_vision/models/hmax/requirements.txt create mode 100644 brainscore_vision/models/hmax/test.py diff --git a/brainscore_vision/models/hmax/__init__.py b/brainscore_vision/models/hmax/__init__.py new file mode 100644 index 000000000..ef269fb93 --- /dev/null +++ b/brainscore_vision/models/hmax/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['hmax'] = lambda: ModelCommitment(identifier='hmax', + activations_model=get_model('hmax'), + layers=get_layers('hmax')) \ No newline at end of file diff --git a/brainscore_vision/models/hmax/helpers/hmax.py b/brainscore_vision/models/hmax/helpers/hmax.py new file mode 100644 index 000000000..57f3c250c --- /dev/null +++ b/brainscore_vision/models/hmax/helpers/hmax.py @@ -0,0 +1,438 @@ +# encoding: utf8 +""" +PyTorch implementation of the HMAX model of human vision. For more information +about HMAX, check: + http://maxlab.neuro.georgetown.edu/hmax.html +The S and C units of the HMAX model can almost be mapped directly onto +TorchVision's Conv2d and MaxPool2d layers, where channels are used to store the +filters for different orientations. However, HMAX also implements multiple +scales, which doesn't map nicely onto the existing TorchVision functionality. +Therefore, each scale has its own Conv2d layer, which are executed in parallel. +Here is a schematic overview of the network architecture: +layers consisting of units with increasing scale +S1 S1 S1 S1 S1 S1 S1 S1 S1 S1 S1 S1 S1 S1 S1 S1 + \ / \ / \ / \ / \ / \ / \ / \ / + C1 C1 C1 C1 C1 C1 C1 C1 + \ \ \ | / / / / + ALL-TO-ALL CONNECTIVITY + / / / | \ \ \ \ + S2 S2 S2 S2 S2 S2 S2 S2 + | | | | | | | | + C2 C2 C2 C2 C2 C2 C2 C2 +Author: Marijn van Vliet +References +---------- + .. [1] Riesenhuber, Maximilian, and Tomaso Poggio. “Hierarchical Models of + Object Recognition in Cortex.” Nature Neuroscience 2, no. 11 (1999): + 1019–25. https://doi.org/10.1038/14819. + .. [2] Serre, T, M Kouh, C Cadieu, U Knoblich, Gabriel Kreiman, and T Poggio. + “A Theory of Object Recognition: Computations and Circuits in the + Feedforward Path of the Ventral Stream in Primate Visual Cortex.” + Artificial Intelligence, no. December (2005): 1–130. + https://doi.org/10.1.1.207.9279. + .. [3] Serre, Thomas, Aude Oliva, and Tomaso Poggio. “A Feedforward + Architecture Accounts for Rapid Categorization.” Proceedings of the + National Academy of Sciences 104, no. 15 (April 10, 2007): 6424–29. + https://doi.org/10.1073/pnas.0700622104. + .. [4] Serre, Thomas, and Maximilian Riesenhuber. “Realistic Modeling of + Simple and Complex Cell Tuning in the HMAXModel, and Implications for + Invariant Object Recognition in Cortex.” CBCL Memo, no. 239 (2004). + .. [5] Serre, Thomas, Lior Wolf, Stanley Bileschi, Maximilian Riesenhuber, + and Tomaso Poggio. “Robust Object Recognition with Cortex-like + Mechanisms.” IEEE Trans Pattern Anal Mach Intell 29, no. 3 (2007): + 411–26. https://doi.org/10.1109/TPAMI.2007.56. +""" +import numpy as np +import torch +from scipy.io import loadmat +from torch import nn + + +def gabor_filter(size, wavelength, orientation): + """Create a single gabor filter. + Parameters + ---------- + size : int + The size of the filter, measured in pixels. The filter is square, hence + only a single number (either width or height) needs to be specified. + wavelength : float + The wavelength of the grating in the filter, relative to the half the + size of the filter. For example, a wavelength of 2 will generate a + Gabor filter with a grating that contains exactly one wave. This + determines the "tightness" of the filter. + orientation : float + The orientation of the grating in the filter, in degrees. + Returns + ------- + filt : ndarray, shape (size, size) + The filter weights. + """ + lambda_ = size * 2. / wavelength + sigma = lambda_ * 0.8 + gamma = 0.3 # spatial aspect ratio: 0.23 < gamma < 0.92 + theta = np.deg2rad(orientation + 90) + + # Generate Gabor filter + x, y = np.mgrid[:size, :size] - (size // 2) + rotx = x * np.cos(theta) + y * np.sin(theta) + roty = -x * np.sin(theta) + y * np.cos(theta) + filt = np.exp(-(rotx ** 2 + gamma ** 2 * roty ** 2) / (2 * sigma ** 2)) + filt *= np.cos(2 * np.pi * rotx / lambda_) + filt[np.sqrt(x ** 2 + y ** 2) > (size / 2)] = 0 + + # Normalize the filter + filt = filt - np.mean(filt) + filt = filt / np.sqrt(np.sum(filt ** 2)) + + return filt + + +class S1(nn.Module): + """A layer of S1 units with different orientations but the same scale. + The S1 units are at the bottom of the network. They are exposed to the raw + pixel data of the image. Each S1 unit is a Gabor filter, which detects + edges in a certain orientation. They are implemented as PyTorch Conv2d + modules, where each channel is loaded with a Gabor filter in a specific + orientation. + Parameters + ---------- + size : int + The size of the filters, measured in pixels. The filters are square, + hence only a single number (either width or height) needs to be + specified. + wavelength : float + The wavelength of the grating in the filter, relative to the half the + size of the filter. For example, a wavelength of 2 will generate a + Gabor filter with a grating that contains exactly one wave. This + determines the "tightness" of the filter. + orientations : list of float + The orientations of the Gabor filters, in degrees. + """ + + def __init__(self, size, wavelength, orientations=[90, -45, 0, 45]): + super().__init__() + self.num_orientations = len(orientations) + self.size = size + + # Use PyTorch's Conv2d as a base object. Each "channel" will be an + # orientation. + self.gabor = nn.Conv2d(1, self.num_orientations, size, + padding=size // 2, bias=False) + + # Fill the Conv2d filter weights with Gabor kernels: one for each + # orientation + for channel, orientation in enumerate(orientations): + self.gabor.weight.data[channel, 0] = torch.Tensor( + gabor_filter(size, wavelength, orientation)) + + # A convolution layer filled with ones. This is used to normalize the + # result in the forward method. + self.uniform = nn.Conv2d(1, 4, size, padding=size // 2, bias=False) + nn.init.constant_(self.uniform.weight, 1) + + # Since everything is pre-computed, no gradient is required + for p in self.parameters(): + p.requires_grad = False + + def forward(self, img): + """Apply Gabor filters, take absolute value, and normalize.""" + s1_output = torch.abs(self.gabor(img)) + norm = torch.sqrt(self.uniform(img ** 2)) + norm.data[norm == 0] = 1 # To avoid divide by zero + s1_output /= norm + return s1_output + + +class C1(nn.Module): + """A layer of C1 units with different orientations but the same scale. + Each C1 unit pools over the S1 units that are assigned to it. + Parameters + ---------- + size : int + Size of the MaxPool2d operation being performed by this C1 layer. + """ + + def __init__(self, size): + super().__init__() + self.size = size + self.local_pool = nn.MaxPool2d(size, stride=size // 2, + padding=size // 2) + + def forward(self, s1_outputs): + """Max over scales, followed by a MaxPool2d operation.""" + s1_outputs = torch.cat([out.unsqueeze(0) for out in s1_outputs], 0) + + # Pool over all scales + s1_output, _ = torch.max(s1_outputs, dim=0) + + # Pool over local (c1_space x c1_space) neighbourhood + return self.local_pool(s1_output) + + +class S2(nn.Module): + """A layer of S2 units with different orientations but the same scale. + The activation of these units is computed by taking the distance between + the output of the C layer below and a set of predefined patches. This + distance is computed as: + d = sqrt( (w - p)^2 ) + = sqrt( w^2 - 2pw + p^2 ) + Parameters + ---------- + patches : ndarray, shape (n_patches, n_orientations, size, size) + The precomputed patches to lead into the weights of this layer. + activation : 'gaussian' | 'euclidean' + Which activation function to use for the units. In the PNAS paper, a + gaussian curve is used ('guassian', the default), whereas the MATLAB + implementation of The Laboratory for Computational Cognitive + Neuroscience uses the euclidean distance ('euclidean'). + sigma : float + The sharpness of the tuning (sigma in eqn 1 of [1]_). Defaults to 1. + References: + ----------- + .. [1] Serre, Thomas, Aude Oliva, and Tomaso Poggio. “A Feedforward + Architecture Accounts for Rapid Categorization.” Proceedings of the + National Academy of Sciences 104, no. 15 (April 10, 2007): 6424–29. + https://doi.org/10.1073/pnas.0700622104. + """ + + def __init__(self, patches, activation='gaussian', sigma=1): + super().__init__() + self.activation = activation + self.sigma = sigma + + num_patches, num_orientations, size, _ = patches.shape + + # Main convolution layer + self.conv = nn.Conv2d(in_channels=num_orientations, + out_channels=num_orientations * num_patches, + kernel_size=size, + padding=size // 2, + groups=num_orientations, + bias=False) + self.conv.weight.data = torch.Tensor( + patches.transpose(1, 0, 2, 3).reshape(1600, 1, size, size)) + + # A convolution layer filled with ones. This is used for the distance + # computation + self.uniform = nn.Conv2d(1, 1, size, padding=size // 2, bias=False) + nn.init.constant_(self.uniform.weight, 1) + + # This is also used for the distance computation + self.patches_sum_sq = nn.Parameter( + torch.Tensor((patches ** 2).sum(axis=(1, 2, 3)))) + + self.num_patches = num_patches + self.num_orientations = num_orientations + self.size = size + + # No gradient required for this layer + for p in self.parameters(): + p.requires_grad = False + + def forward(self, c1_outputs): + max = 58 + min = 22 + s2_outputs = [] + for c1_output in c1_outputs: + conv_output = self.conv(c1_output) + + # Unstack the orientations + conv_output_size = conv_output.shape[3] + conv_output = conv_output.view( + -1, self.num_orientations, self.num_patches, conv_output_size, + conv_output_size) + + # Pool over orientations + conv_output = conv_output.sum(dim=1) + + # Compute distance + c1_sq = self.uniform( + torch.sum(c1_output ** 2, dim=1, keepdim=True)) + dist = c1_sq - 2 * conv_output + dist += self.patches_sum_sq[None, :, None, None] + + # Apply activation function + if self.activation == 'gaussian': + dist = torch.exp(- 1 / (2 * self.sigma ** 2) * dist) + elif self.activation == 'euclidean': + dist[dist < 0] = 0 # Negative values should never occur + torch.sqrt_(dist) + dist = -dist + else: + raise ValueError("activation parameter should be either " + "'gaussian' or 'euclidean'.") + + s2_outputs.append(dist) + # s2_out = np.concatenate([np.atleast_2d(_m) for _m in s2_outputs], 0) + # return torch.Tensor(s2_out) + return s2_outputs + + +class C2(nn.Module): + """A layer of C2 units operating on a layer of S2 units.""" + + def forward(self, s2_outputs): + """Take the maximum value of the underlying S2 units.""" + maxs = [s2.max(dim=3)[0] for s2 in s2_outputs] + maxs = [m.max(dim=2)[0] for m in maxs] + maxs = torch.cat([m[:, None, :] for m in maxs], 1) + return maxs.max(dim=1)[0] + + +class Output(nn.Module): + def forward(self, prev_out): + return prev_out + + +class HMAX(nn.Module): + """The full HMAX model. + Use the `get_all_layers` method to obtain the activations for all layers. + If you are only interested in the final output (=C2 layer), use the model + as any other PyTorch module: + model = HMAX(universal_patch_set) + output = model(img) + Parameters + ---------- + universal_patch_set : str + Filename of the .mat file containing the universal patch set. + s2_act : 'gaussian' | 'euclidean' + The activation function for the S2 units. Defaults to 'gaussian'. + Returns + ------- + c2_output : list of Tensors, shape (batch_size, num_patches) + For each scale, the output of the C2 units. + """ + + def __init__(self, universal_patch_set, s2_act='gaussian'): + super().__init__() + + # S1 layers, consisting of units with increasing size + self.s1_units = [ + S1(size=7, wavelength=4), + S1(size=9, wavelength=3.95), + S1(size=11, wavelength=3.9), + S1(size=13, wavelength=3.85), + S1(size=15, wavelength=3.8), + S1(size=17, wavelength=3.75), + S1(size=19, wavelength=3.7), + S1(size=21, wavelength=3.65), + S1(size=23, wavelength=3.6), + S1(size=25, wavelength=3.55), + S1(size=27, wavelength=3.5), + S1(size=29, wavelength=3.45), + S1(size=31, wavelength=3.4), + S1(size=33, wavelength=3.35), + S1(size=35, wavelength=3.3), + S1(size=37, wavelength=3.25), + ] + + # Explicitly add the S1 units as submodules of the model + for s1 in self.s1_units: + self.add_module('s1_%02d' % s1.size, s1) + + # Each C1 layer pools across two S1 layers + self.c1_units = [ + C1(size=8), + C1(size=10), + C1(size=12), + C1(size=14), + C1(size=16), + C1(size=18), + C1(size=20), + C1(size=22), + ] + + # Explicitly add the C1 units as submodules of the model + for c1 in self.c1_units: + self.add_module('c1_%02d' % c1.size, c1) + + # Read the universal patch set for the S2 layer + m = loadmat(universal_patch_set) + patches = [patch.reshape(shape[[2, 1, 0, 3]]).transpose(3, 0, 2, 1) + for patch, shape in zip(m['patches'][0], m['patchSizes'].T)] + + # One S2 layer for each patch scale, operating on all C1 layers + self.s2_units = [S2(patches=scale_patches, activation=s2_act) + for scale_patches in patches] + + # Explicitly add the S2 units as submodules of the model + for i, s2 in enumerate(self.s2_units): + self.add_module('s2_%d' % i, s2) + + # One C2 layer operating on each scale + self.c2_units = [C2() for s2 in self.s2_units] + # self.outs = {} + for name in ['s1_out', 's2_out', 'c1_out', 'c2_out']: + out = Output() + # self.outs[name] = out + self.add_module(name, out) + + # Explicitly add the C2 units as submodules of the model + for i, c2 in enumerate(self.c2_units): + self.add_module('c2_%d' % i, c2) + + def run_all_layers(self, img): + """Compute the activation for each layer. + Parameters + ---------- + img : Tensor, shape (batch_size, 1, height, width) + A batch of images to run through the model + Returns + ------- + s1_outputs : List of Tensors, shape (batch_size, num_orientations, height, width) + For each scale, the output of the layer of S1 units. + c1_outputs : List of Tensors, shape (batch_size, num_orientations, height, width) + For each scale, the output of the layer of C1 units. + s2_outputs : List of lists of Tensors, shape (batch_size, num_patches, height, width) + For each C1 scale and each patch scale, the output of the layer of + S2 units. + c2_outputs : List of Tensors, shape (batch_size, num_patches) + For each patch scale, the output of the layer of C2 units. + """ + s1_outputs = [s1(img) for s1 in self.s1_units] + self.s1_out(s1_outputs) + # Each C1 layer pools across two S1 layers + c1_outputs = [] + for c1, i in zip(self.c1_units, range(0, len(self.s1_units), 2)): + c1_outputs.append(c1(s1_outputs[i:i + 2])) + self.c1_out(c1_outputs) + s2_outputs = [s2(c1_outputs) for s2 in self.s2_units] + self.s2_out(s2_outputs) + c2_outputs = [c2(s2) for c2, s2 in zip(self.c2_units, s2_outputs)] + self.c2_out(c2_outputs) + + return s1_outputs, c1_outputs, s2_outputs, c2_outputs + + def forward(self, img): + """Run through everything and concatenate the output of the C2s.""" + img = img.mean(axis=1).reshape(img.shape[0], 1, img.shape[-1], img.shape[-1]) + c2_outputs = self.run_all_layers(img)[-1] + c2_outputs = torch.cat( + [c2_out[:, None, :] for c2_out in c2_outputs], 1) + return c2_outputs + + def get_all_layers(self, img): + """Get the activation for all layers as NumPy arrays. + Parameters + ---------- + img : Tensor, shape (batch_size, 1, height, width) + A batch of images to run through the model + Returns + ------- + s1_outputs : List of arrays, shape (batch_size, num_orientations, height, width) + For each scale, the output of the layer of S1 units. + c1_outputs : List of arrays, shape (batch_size, num_orientations, height, width) + For each scale, the output of the layer of C1 units. + s2_outputs : List of lists of arrays, shape (batch_size, num_patches, height, width) + For each C1 scale and each patch scale, the output of the layer of + S2 units. + c2_outputs : List of arrays, shape (batch_size, num_patches) + For each patch scale, the output of the layer of C2 units. + """ + s1_out, c1_out, s2_out, c2_out = self.run_all_layers(img) + return ( + [s1.cpu().detach().numpy() for s1 in s1_out], + [c1.cpu().detach().numpy() for c1 in c1_out], + [[s2_.cpu().detach().numpy() for s2_ in s2] for s2 in s2_out], + [c2.cpu().detach().numpy() for c2 in c2_out], + ) diff --git a/brainscore_vision/models/hmax/helpers/pytorch.py b/brainscore_vision/models/hmax/helpers/pytorch.py new file mode 100644 index 000000000..0df64100c --- /dev/null +++ b/brainscore_vision/models/hmax/helpers/pytorch.py @@ -0,0 +1,216 @@ +import os +import logging +from collections import OrderedDict + +import numpy as np +import torch +from PIL import Image + +from brainscore_vision.model_helpers.activations.core import ActivationsExtractorHelper +from brainscore_vision.model_helpers.utils import fullname + +SUBMODULE_SEPARATOR = '.' + + +class PytorchWrapper: + def __init__(self, model, preprocessing, identifier=None, *args, **kwargs): + import torch + logger = logging.getLogger(fullname(self)) + self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + logger.debug(f"Using device {self._device}") + self._model = model + self._model = self._model.to(self._device) + identifier = identifier or model.__class__.__name__ + self._extractor = self._build_extractor( + identifier=identifier, preprocessing=preprocessing, get_activations=self.get_activations, *args, **kwargs) + self._extractor.insert_attrs(self) + + def _build_extractor(self, identifier, preprocessing, get_activations, *args, **kwargs): + return ActivationsExtractorHelper( + identifier=identifier, get_activations=get_activations, preprocessing=preprocessing, + *args, **kwargs) + + @property + def identifier(self): + return self._extractor.identifier + + @identifier.setter + def identifier(self, value): + self._extractor.identifier = value + + def __call__(self, *args, **kwargs): + previous_value = os.getenv('RESULTCACHING_DISABLE', '') + os.environ['RESULTCACHING_DISABLE'] = 'model_tools.activations' + result = self._extractor(*args, **kwargs) + os.environ['RESULTCACHING_DISABLE'] = previous_value + return result + + def get_activations(self, images, layer_names): + import torch + from torch.autograd import Variable + images = [torch.from_numpy(image) for image in images] + images = Variable(torch.stack(images)) + images = images.to(self._device) + self._model.eval() + + layer_results = OrderedDict() + hooks = [] + + for layer_name in layer_names: + layer = self.get_layer(layer_name) + hook = self.register_hook(layer, layer_name, target_dict=layer_results) + hooks.append(hook) + + self._model(images) + for hook in hooks: + hook.remove() + return layer_results + + def get_layer(self, layer_name): + if layer_name == 'logits': + return self._output_layer() + module = self._model + for part in layer_name.split(SUBMODULE_SEPARATOR): + module = module._modules.get(part) + assert module is not None, f"No submodule found for layer {layer_name}, at part {part}" + return module + + def _output_layer(self): + module = self._model + while module._modules: + module = module._modules[next(reversed(module._modules))] + return module + + @classmethod + def set_weights(cls, output, shape): + if len(shape) == 5: + return torch.cat([out[:shape[0],None, :shape[1], :shape[2], :shape[3], :shape[4]] for out in output], 1) + if len(shape) == 4: + return torch.cat([out[:shape[0],None, :shape[1], :shape[2], :shape[3]] for out in output], 1) + elif len(shape) == 3: + return torch.cat([out[:shape[0],None,:shape[1], :shape[2]] for out in output], 1) + elif len(shape) == 2: + return torch.cat([out[:shape[0],None ,:shape[1]] for out in output], 1) + elif len(shape) == 1: + return torch.cat([out[:shape[0], None] for out in output], 1) + else: + raise ValueError(f"Unexpected shape {shape}") + + @classmethod + def _tensor_to_numpy(cls, output): + if isinstance(output, list): + zero = output[0] + if isinstance(zero, list): + length = len(zero[0].shape) + shape = np.full(length, 100000) + for li in output: + for item in li: + for j in range(len(item.shape)): + if shape[j] > item.shape[j]: + shape[j] = item.shape[j] + shape = shape.astype(int) + new_weights = [] + for i in range(len(output)): + outer = output[i] + new_weights.append(cls.set_weights(outer, shape)) + new_weights = cls.set_weights(new_weights, new_weights[0].shape) + if len(new_weights.shape) == 6: + new_weights = new_weights.reshape(new_weights.shape[0],new_weights.shape[1]*new_weights.shape[2]*new_weights.shape[3],new_weights.shape[4],new_weights.shape[5]) + elif len(new_weights.shape) == 5: + new_weights = new_weights.reshape(new_weights.shape[0],new_weights.shape[1]*new_weights.shape[2],new_weights.shape[3],new_weights.shape[4]) + elif len(new_weights.shape) == 4: + pass # ok already + elif len(new_weights.shape) == 3: + new_weights = new_weights.reshape(new_weights.shape[0],1,new_weights.shape[1],new_weights.shape[2]) + elif len(new_weights.shape) == 2: + new_weights = new_weights.reshape(new_weights.shape[0],1,1,new_weights.shape[1]) + else: + raise ValueError(f"Unexpected shape {new_weights.shape}") + return new_weights + else: + length = len(zero.shape) + shape = np.full(length, 10000) + for item in output: + for j in range(len(item.shape)): + if shape[j] > item.shape[j]: + shape[j] = item.shape[j] + shape = shape.astype(int) + new_weights = cls.set_weights(output, shape) + if len(new_weights.shape) > 4: + new_weights = new_weights.reshape(new_weights.shape[0],new_weights.shape[1]*new_weights.shape[2],new_weights.shape[3],new_weights.shape[4]) + if len(new_weights.shape) == 3: + new_weights = new_weights.reshape(new_weights.shape[0],1,new_weights.shape[1],new_weights.shape[2]) + if len(new_weights.shape) == 2: + new_weights = new_weights.reshape(new_weights.shape[0],1,1,new_weights.shape[1]) + return new_weights + return output.cpu().data.numpy() + + def register_hook(self, layer, layer_name, target_dict): + def hook_function(_layer, _input, output, name=layer_name): + target_dict[name] = PytorchWrapper._tensor_to_numpy(output) + + hook = layer.register_forward_hook(hook_function) + return hook + + def __repr__(self): + return repr(self._model) + + def layers(self): + for name, module in self._model.named_modules(): + if len(list(module.children())) > 0: # this module only holds other modules + continue + yield name, module + + def graph(self): + import networkx as nx + g = nx.DiGraph() + for layer_name, layer in self.layers(): + g.add_node(layer_name, object=layer, type=type(layer)) + return g + + +def load_preprocess_images(image_filepaths, image_size, **kwargs): + images = load_images(image_filepaths) + images = preprocess_images(images, image_size=image_size, **kwargs) + return images + + +def load_images(image_filepaths): + return [load_image(image_filepath) for image_filepath in image_filepaths] + + +def load_image(image_filepath): + with Image.open(image_filepath) as pil_image: + if 'L' not in pil_image.mode.upper() and 'A' not in pil_image.mode.upper(): # not binary and not alpha + # work around to https://github.com/python-pillow/Pillow/issues/1144, + # see https://stackoverflow.com/a/30376272/2225200 + return pil_image.copy() + else: # make sure potential binary images are in RGB + rgb_image = Image.new("RGB", pil_image.size) + rgb_image.paste(pil_image) + return rgb_image + + +def preprocess_images(images, image_size, **kwargs): + preprocess = torchvision_preprocess_input(image_size, **kwargs) + images = [preprocess(image) for image in images] + images = np.concatenate(images) + return images + + +def torchvision_preprocess_input(image_size, **kwargs): + from torchvision import transforms + return transforms.Compose([ + transforms.Resize((image_size, image_size)), + torchvision_preprocess(**kwargs), + ]) + + +def torchvision_preprocess(normalize_mean=(0.485, 0.456, 0.406), normalize_std=(0.229, 0.224, 0.225)): + from torchvision import transforms + return transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=normalize_mean, std=normalize_std), + lambda img: img.unsqueeze(0) + ]) + diff --git a/brainscore_vision/models/hmax/model.py b/brainscore_vision/models/hmax/model.py new file mode 100644 index 000000000..028c914e2 --- /dev/null +++ b/brainscore_vision/models/hmax/model.py @@ -0,0 +1,69 @@ +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.check_submission import check_models +import functools +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +import ssl +from .helpers.hmax import HMAX +from .helpers.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.s3 import load_weight_file + + +ssl._create_default_https_context = ssl._create_unverified_context +model = None + + +def get_model(name): + assert name == 'hmax' + return get_hmax(name, 224) + + +def get_hmax(identifier, image_size): + weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + relative_path="hmax/universal_patch_set.mat", + version_id="fIX.lsvnc8qqjDr_sG_Dl9RyqWuG0OGC", + sha1="acc7316fcb0d1797486bb62753b71e158216a92a") + global model + model = HMAX(str(weights_path)) + + preprocessing = functools.partial(load_preprocess_images, image_size=image_size) + wrapper = PytorchWrapper(identifier=identifier, model=model, + preprocessing=preprocessing) + wrapper.image_size = image_size + return wrapper + + +def get_layers(name): + assert name == 'hmax' + global model + layer_names = [] + for name, module in model.named_modules(): + print(name) + layer_names.append(name) + + return layer_names[-8:] + + +def get_bibtex(model_identifier): + """ + A method returning the bibtex reference of the requested model as a string. + """ + return """@ARTICLE {, + author = {G. Cortelazzo and M. Balanza}, + journal = {IEEE Transactions on Pattern Analysis & Machine Intelligence}, + title = {Frequency Domain Analysis of Translations with Piecewise Cubic Trajectories}, + year = {1993}, + volume = {29}, + number = {04}, + issn = {1939-3539}, + pages = {411-416}, + keywords = {frequency domain motion analysis; motion estimation; translations; piecewise cubic trajectories; cubic spline trajectories; finite-duration effects; constant velocity motion; first-order model; frequency-domain analysis; motion estimation; splines (mathematics)}, + doi = {10.1109/34.206960}, + publisher = {IEEE Computer Society}, + address = {Los Alamitos, CA, USA}, + month = {apr} + } + """ + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/hmax/requirements.txt b/brainscore_vision/models/hmax/requirements.txt new file mode 100644 index 000000000..94fbf10ef --- /dev/null +++ b/brainscore_vision/models/hmax/requirements.txt @@ -0,0 +1,8 @@ +torchvision +torch +numpy +os +scipy +logging +PIL +collections \ No newline at end of file diff --git a/brainscore_vision/models/hmax/test.py b/brainscore_vision/models/hmax/test.py new file mode 100644 index 000000000..9b703f778 --- /dev/null +++ b/brainscore_vision/models/hmax/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('hmax') + assert model.identifier == 'hmax' \ No newline at end of file From b84502aad25c443419e59a9579cab82110db2323 Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Mon, 1 Jul 2024 10:11:27 -0400 Subject: [PATCH 17/68] Add ViT_L_32_imagenet1k (#956) * Add ViT_L_32_imagenet1k * Update brainscore_vision/models/ViT_L_32_imagenet1k/model.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/models/ViT_L_32_imagenet1k/model.py Co-authored-by: Martin Schrimpf --------- Co-authored-by: Ethan Pellegrini Co-authored-by: Martin Schrimpf Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .../models/ViT_L_32_imagenet1k/__init__.py | 8 ++++ .../models/ViT_L_32_imagenet1k/model.py | 43 +++++++++++++++++++ .../ViT_L_32_imagenet1k/requirements.txt | 3 ++ .../models/ViT_L_32_imagenet1k/test.py | 8 ++++ 4 files changed, 62 insertions(+) create mode 100644 brainscore_vision/models/ViT_L_32_imagenet1k/__init__.py create mode 100644 brainscore_vision/models/ViT_L_32_imagenet1k/model.py create mode 100644 brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt create mode 100644 brainscore_vision/models/ViT_L_32_imagenet1k/test.py diff --git a/brainscore_vision/models/ViT_L_32_imagenet1k/__init__.py b/brainscore_vision/models/ViT_L_32_imagenet1k/__init__.py new file mode 100644 index 000000000..1ebcfe52d --- /dev/null +++ b/brainscore_vision/models/ViT_L_32_imagenet1k/__init__.py @@ -0,0 +1,8 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + + +model_registry['ViT_L_32_imagenet1k'] = lambda: ModelCommitment(identifier='ViT_L_32_imagenet1k', + activations_model=get_model('ViT_L_32_imagenet1k'), + layers=get_layers('ViT_L_32_imagenet1k')) \ No newline at end of file diff --git a/brainscore_vision/models/ViT_L_32_imagenet1k/model.py b/brainscore_vision/models/ViT_L_32_imagenet1k/model.py new file mode 100644 index 000000000..39bf52a97 --- /dev/null +++ b/brainscore_vision/models/ViT_L_32_imagenet1k/model.py @@ -0,0 +1,43 @@ +import functools +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from brainscore_vision.model_helpers.check_submission import check_models +from pytorch_pretrained_vit import ViT +import ssl + + +ssl._create_default_https_context = ssl._create_unverified_context + + +# Visual Transformer (ViT) +# Using PyTorch implementation and converted weights from https://github.com/lukemelas/PyTorch-Pretrained-ViT + +def get_model(name): + assert name == 'ViT_L_32_imagenet1k' + model = ViT(name[4:], pretrained=True) + preprocessing = functools.partial(load_preprocess_images, image_size=model.image_size[0]) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = model.image_size[0] + return wrapper + + +def get_layers(name): + assert name == 'ViT_L_32_imagenet1k' + name = name[4:] + number_of_blocks = 12 if name.startswith('B') else 24 + return [f'transformer.blocks.{i}.pwff.fc2' for i in range(number_of_blocks)] + + +def get_bibtex(model_identifier): + return """@inproceedings{ + dosovitskiy2021an, + title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, + author={Alexey Dosovitskiy and Lucas Beyer and Alexander Kolesnikov and Dirk Weissenborn and Xiaohua Zhai and Thomas Unterthiner and Mostafa Dehghani and Matthias Minderer and Georg Heigold and Sylvain Gelly and Jakob Uszkoreit and Neil Houlsby}, + booktitle={International Conference on Learning Representations}, + year={2021}, + url={https://openreview.net/forum?id=YicbFdNTTy} + }""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) \ No newline at end of file diff --git a/brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt b/brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt new file mode 100644 index 000000000..bd7ed7583 --- /dev/null +++ b/brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt @@ -0,0 +1,3 @@ +numpy +torch +pytorch_pretrained_vit \ No newline at end of file diff --git a/brainscore_vision/models/ViT_L_32_imagenet1k/test.py b/brainscore_vision/models/ViT_L_32_imagenet1k/test.py new file mode 100644 index 000000000..cbb3d68ca --- /dev/null +++ b/brainscore_vision/models/ViT_L_32_imagenet1k/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('ViT_L_32_imagenet1k') + assert model.identifier == 'ViT_L_32_imagenet1k' \ No newline at end of file From 6255370094a72195b3a46b504ca5d84a6c20de30 Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Mon, 1 Jul 2024 14:44:07 -0400 Subject: [PATCH 18/68] Ep/add antialias resnet152 (#952) * Add antialias-resnet152 * Update model.py * Removing dash from python files to make module name valid --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Deirdre Kelliher --- .../models/antialias_resnet152/__init__.py | 7 ++++ .../models/antialias_resnet152/model.py | 35 +++++++++++++++++++ .../antialias_resnet152/requirements.txt | 3 ++ .../models/antialias_resnet152/test.py | 8 +++++ 4 files changed, 53 insertions(+) create mode 100644 brainscore_vision/models/antialias_resnet152/__init__.py create mode 100644 brainscore_vision/models/antialias_resnet152/model.py create mode 100644 brainscore_vision/models/antialias_resnet152/requirements.txt create mode 100644 brainscore_vision/models/antialias_resnet152/test.py diff --git a/brainscore_vision/models/antialias_resnet152/__init__.py b/brainscore_vision/models/antialias_resnet152/__init__.py new file mode 100644 index 000000000..3dd83c58b --- /dev/null +++ b/brainscore_vision/models/antialias_resnet152/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['antialias-resnet152'] = lambda: ModelCommitment(identifier='antialias-resnet152', + activations_model=get_model('antialias-resnet152'), + layers=get_layers('antialias-resnet152')) \ No newline at end of file diff --git a/brainscore_vision/models/antialias_resnet152/model.py b/brainscore_vision/models/antialias_resnet152/model.py new file mode 100644 index 000000000..66ae69a52 --- /dev/null +++ b/brainscore_vision/models/antialias_resnet152/model.py @@ -0,0 +1,35 @@ +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +import ssl +import functools +import antialiased_cnns +from brainscore_vision.model_helpers.check_submission import check_models + +ssl._create_default_https_context = ssl._create_unverified_context + + +def get_model(name): + assert name == 'antialias-resnet152' + model = antialiased_cnns.resnet152(pretrained=True) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier='antialiased-resnet-152', model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'antialias-resnet152' + return ['layer1.1', 'layer1.2', 'layer2.1', 'layer2.2', 'layer2.3', 'layer2.4', + 'layer2.5', 'layer2.6', 'layer2.7', 'layer3.0'] + ['layer3.' + str(i) for i in range(1, 36, 6)] + [ + 'layer3.35', 'layer4.1', 'layer4.2', 'avgpool', 'fc'] + + +def get_bibtex(model_identifier): + """ + A method returning the bibtex reference of the requested model as a string. + """ + return """""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/antialias_resnet152/requirements.txt b/brainscore_vision/models/antialias_resnet152/requirements.txt new file mode 100644 index 000000000..4261bdb79 --- /dev/null +++ b/brainscore_vision/models/antialias_resnet152/requirements.txt @@ -0,0 +1,3 @@ +torchvision +torch +antialiased-cnns \ No newline at end of file diff --git a/brainscore_vision/models/antialias_resnet152/test.py b/brainscore_vision/models/antialias_resnet152/test.py new file mode 100644 index 000000000..a0df07447 --- /dev/null +++ b/brainscore_vision/models/antialias_resnet152/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('antialias-resnet152') + assert model.identifier == 'antialias-resnet152' \ No newline at end of file From 3b713e8fedab5c13e0fbb0acef0462821d3c94a0 Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Mon, 1 Jul 2024 15:21:53 -0400 Subject: [PATCH 19/68] Add mobilenet_v2_1-4_244 (#995) * Add model files * Update model.py * Replace hyphen --------- Co-authored-by: Ethan Pellegrini --- .../mobilenet_v2_1_4_224_pytorch/__init__.py | 7 +++ .../mobilenet_v2_1_4_224_pytorch/model.py | 59 +++++++++++++++++++ .../requirements.txt | 3 + .../mobilenet_v2_1_4_224_pytorch/test.py | 8 +++ 4 files changed, 77 insertions(+) create mode 100644 brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py create mode 100644 brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py create mode 100644 brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/requirements.txt create mode 100644 brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py diff --git a/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py b/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py new file mode 100644 index 000000000..4d4eed4b6 --- /dev/null +++ b/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['mobilenet_v2_1-4_224_pytorch'] = lambda: ModelCommitment(identifier='mobilenet_v2_1-4_224_pytorch', + activations_model=get_model('mobilenet_v2_1-4_224_pytorch'), + layers=get_layers('mobilenet_v2_1-4_224_pytorch')) \ No newline at end of file diff --git a/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py b/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py new file mode 100644 index 000000000..0901970df --- /dev/null +++ b/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py @@ -0,0 +1,59 @@ +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_images, load_preprocess_images +from brainscore_vision.model_helpers.check_submission import check_models +import ssl +from transformers import MobileNetV2ForImageClassification +import functools + +ssl._create_default_https_context = ssl._create_unverified_context + +''' +This is a Pytorch implementation of mobilenet_v2_1.0_224. + +Previously on Brain-Score, this model existed as a Tensorflow model, and was converted via: + https://huggingface.co/Matthijs/mobilenet_v2_1.4_224 + +Disclaimer: This (pytorch) implementation's Brain-Score scores might not align identically with Tensorflow +implementation. + +''' + + +MODEL = MobileNetV2ForImageClassification.from_pretrained("Matthijs/mobilenet_v2_1.4_224") + + +def get_model(name): + assert name == 'mobilenet_v2_1-4_224_pytorch' + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier='mobilenet_v2_1-4_224_pytorch', model=MODEL, + preprocessing=preprocessing, + batch_size=4) # doesn't fit into 12 GB GPU memory otherwise + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'mobilenet_v2_1-4_224_pytorch' + layer_names = [] + + for name, module in MODEL.named_modules(): + layer_names.append(name) + + return layer_names[-50:] + + +def get_bibtex(model_identifier): + """ + A method returning the bibtex reference of the requested model as a string. + """ + return """@inproceedings{mobilenetv22018, + title={MobileNetV2: Inverted Residuals and Linear Bottlenecks}, + author={Mark Sandler and Andrew Howard and Menglong Zhu and Andrey Zhmoginov and Liang-Chieh Chen}, + booktitle={CVPR}, + year={2018} + } + """ + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/requirements.txt b/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/requirements.txt new file mode 100644 index 000000000..1d28db41f --- /dev/null +++ b/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/requirements.txt @@ -0,0 +1,3 @@ +torchvision +torch +transformers \ No newline at end of file diff --git a/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py b/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py new file mode 100644 index 000000000..7d289e20c --- /dev/null +++ b/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('mobilenet_v2_1-4_224_pytorch') + assert model.identifier == 'mobilenet_v2_1-4_224_pytorch' \ No newline at end of file From 8a54e56b0eadb5774150048fc2a0dfbd1ca74ea6 Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Mon, 1 Jul 2024 15:22:15 -0400 Subject: [PATCH 20/68] Add reset-152_v2 (#993) * Add model files * removing dash from package name --------- Co-authored-by: Ethan Pellegrini Co-authored-by: Deirdre Kelliher --- .../models/resnet_152_v2_pytorch/__init__.py | 7 +++ .../models/resnet_152_v2_pytorch/model.py | 59 +++++++++++++++++++ .../resnet_152_v2_pytorch/requirements.txt | 2 + .../models/resnet_152_v2_pytorch/test.py | 8 +++ 4 files changed, 76 insertions(+) create mode 100644 brainscore_vision/models/resnet_152_v2_pytorch/__init__.py create mode 100644 brainscore_vision/models/resnet_152_v2_pytorch/model.py create mode 100644 brainscore_vision/models/resnet_152_v2_pytorch/requirements.txt create mode 100644 brainscore_vision/models/resnet_152_v2_pytorch/test.py diff --git a/brainscore_vision/models/resnet_152_v2_pytorch/__init__.py b/brainscore_vision/models/resnet_152_v2_pytorch/__init__.py new file mode 100644 index 000000000..1ae97a7a0 --- /dev/null +++ b/brainscore_vision/models/resnet_152_v2_pytorch/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['resnet-152_v2_pytorch'] = lambda: ModelCommitment(identifier='resnet-152_v2_pytorch', + activations_model=get_model('resnet-152_v2_pytorch'), + layers=get_layers('resnet-152_v2_pytorch')) diff --git a/brainscore_vision/models/resnet_152_v2_pytorch/model.py b/brainscore_vision/models/resnet_152_v2_pytorch/model.py new file mode 100644 index 000000000..d62717e55 --- /dev/null +++ b/brainscore_vision/models/resnet_152_v2_pytorch/model.py @@ -0,0 +1,59 @@ +import functools +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from brainscore_vision.model_helpers.check_submission import check_models +import torchvision +import ssl + + +ssl._create_default_https_context = ssl._create_unverified_context + +''' +This is a Pytorch implementation of resnet-152_v2. + +Previously on Brain-Score, this model existed as a Tensorflow model, and was converted via: + https://pytorch.org/vision/main/models/generated/torchvision.models.resnet152.html + +Disclaimer: This (pytorch) implementation's Brain-Score scores might not align identically with Tensorflow +implementation. + +''' + +MODEL = torchvision.models.resnet152(weights='ResNet152_Weights.IMAGENET1K_V2') # use V2 weights + +def get_model(name): + assert name == 'resnet-152_v2_pytorch' + model_identifier = "resnet-152_v2_pytorch" + preprocessing = functools.partial(load_preprocess_images, image_size=299) + wrapper = PytorchWrapper(identifier=model_identifier, model=MODEL, preprocessing=preprocessing) + wrapper.image_size = 299 + return wrapper + + +def get_layers(name): + assert name == 'resnet-152_v2_pytorch' + layer_names = [] + + for name, module in MODEL.named_modules(): + layer_names.append(name) + + return layer_names[-15:] + + +def get_bibtex(model_identifier): + """ + A method returning the bibtex reference of the requested model as a string. + """ + return """@misc{he2016identity, + title={Identity Mappings in Deep Residual Networks}, + author={Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, + year={2016}, + eprint={1603.05027}, + archivePrefix={arXiv}, + primaryClass={cs.CV} + } + """ + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/resnet_152_v2_pytorch/requirements.txt b/brainscore_vision/models/resnet_152_v2_pytorch/requirements.txt new file mode 100644 index 000000000..a56666d38 --- /dev/null +++ b/brainscore_vision/models/resnet_152_v2_pytorch/requirements.txt @@ -0,0 +1,2 @@ +torchvision +torch \ No newline at end of file diff --git a/brainscore_vision/models/resnet_152_v2_pytorch/test.py b/brainscore_vision/models/resnet_152_v2_pytorch/test.py new file mode 100644 index 000000000..6564f28ed --- /dev/null +++ b/brainscore_vision/models/resnet_152_v2_pytorch/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('resnet-152_v2_pytorch') + assert model.identifier == 'resnet-152_v2_pytorch' \ No newline at end of file From e805d01eabfb33a7f8d44115ea0f62e33ae1bfa0 Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Mon, 1 Jul 2024 15:22:38 -0400 Subject: [PATCH 21/68] Add AdvProp_efficientnet-b6 (#942) * Add AdvProp_efficientnet-b6 * removing dash from package name --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Deirdre Kelliher --- .../models/AdvProp_efficientne_b6/__init__.py | 5 ++ .../models/AdvProp_efficientne_b6/model.py | 75 +++++++++++++++++++ .../AdvProp_efficientne_b6/requirements.txt | 1 + .../models/AdvProp_efficientne_b6/test.py | 9 +++ 4 files changed, 90 insertions(+) create mode 100644 brainscore_vision/models/AdvProp_efficientne_b6/__init__.py create mode 100644 brainscore_vision/models/AdvProp_efficientne_b6/model.py create mode 100644 brainscore_vision/models/AdvProp_efficientne_b6/requirements.txt create mode 100644 brainscore_vision/models/AdvProp_efficientne_b6/test.py diff --git a/brainscore_vision/models/AdvProp_efficientne_b6/__init__.py b/brainscore_vision/models/AdvProp_efficientne_b6/__init__.py new file mode 100644 index 000000000..2dbeb2005 --- /dev/null +++ b/brainscore_vision/models/AdvProp_efficientne_b6/__init__.py @@ -0,0 +1,5 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['AdvProp_efficientnet-b6'] = lambda: ModelCommitment(identifier='AdvProp_efficientnet-b6', activations_model=get_model('AdvProp_efficientnet-b6'), layers=get_layers('AdvProp_efficientnet-b6')) diff --git a/brainscore_vision/models/AdvProp_efficientne_b6/model.py b/brainscore_vision/models/AdvProp_efficientne_b6/model.py new file mode 100644 index 000000000..bf29e94ec --- /dev/null +++ b/brainscore_vision/models/AdvProp_efficientne_b6/model.py @@ -0,0 +1,75 @@ +import functools + +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.check_submission import check_models +from efficientnet_pytorch import EfficientNet + +""" +Template module for a base model submission to brain-score +""" + +import ssl +ssl._create_default_https_context = ssl._create_unverified_context + +def get_model(name): + """ + This method fetches an instance of a base model. The instance has to be callable and return a xarray object, + containing activations. There exist standard wrapper implementations for common libraries, like pytorch and + keras. Checkout the examples folder, to see more. For custom implementations check out the implementation of the + wrappers. + :param name: the name of the model to fetch + :return: the model instance + """ + model = name + AT = False + if 'AdvProp_' in name: + AT = True + model = model.split('AdvProp_')[-1] + + model = EfficientNet.from_pretrained(model, advprop=AT) + model.set_swish(memory_efficient=False) + + if AT: + preprocessing = functools.partial(load_preprocess_images, image_size=224, normalize_mean=(0.5, 0.5, 0.5), normalize_std=(0.5, 0.5, 0.5)) + else: + preprocessing = functools.partial(load_preprocess_images, image_size=224) + + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + from types import MethodType + def _output_layer(self): + return self._model._fc + + wrapper._output_layer = MethodType(_output_layer, wrapper) + wrapper.image_size = 224 + return wrapper + +def get_layers(name): + """ + This method returns a list of string layer names to consider per model. The benchmarks maps brain regions to + layers and uses this list as a set of possible layers. The lists doesn't have to contain all layers, the less the + faster the benchmark process works. Additionally the given layers have to produce an activations vector of at least + size 25! The layer names are delivered back to the model instance and have to be resolved in there. For a pytorch + model, the layer name are for instance dot concatenated per module, e.g. "features.2". + :param name: the name of the model, to return the layers for + :return: a list of strings containing all layers, that should be considered as brain area. + """ + lmap = { + 'efficientnet-b6' : [f'_blocks.{i}' for i in range(45)] + } + name = name.split('AdvProp_')[-1] + assert name in lmap + return lmap[name] + + +def get_bibtex(model_identifier): + """ + A method returning the bibtex reference of the requested model as a string. + """ + return '' + + +if __name__ == '__main__': + # Use this method to ensure the correctness of the BaseModel implementations. + # It executes a mock run of brain-score benchmarks. + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/AdvProp_efficientne_b6/requirements.txt b/brainscore_vision/models/AdvProp_efficientne_b6/requirements.txt new file mode 100644 index 000000000..f09a81cec --- /dev/null +++ b/brainscore_vision/models/AdvProp_efficientne_b6/requirements.txt @@ -0,0 +1 @@ +efficientnet_pytorch \ No newline at end of file diff --git a/brainscore_vision/models/AdvProp_efficientne_b6/test.py b/brainscore_vision/models/AdvProp_efficientne_b6/test.py new file mode 100644 index 000000000..206b4315a --- /dev/null +++ b/brainscore_vision/models/AdvProp_efficientne_b6/test.py @@ -0,0 +1,9 @@ +# Left empty as part of 2023 models migration + +import pytest +import brainscore_vision + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('AdvProp_efficientnet-b6') + assert model.identifier == 'AdvProp_efficientnet-b6' \ No newline at end of file From 2c1e44df719e4f64ce47bb516e2e75e9f364c6c1 Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Mon, 1 Jul 2024 15:34:11 -0400 Subject: [PATCH 22/68] Add antialiased-rnext101_32x8d (#960) * Add antialiased-rnext101_32x8d * Fix import * Remove redundant package installs * removing dash from package name --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Deirdre Kelliher --- .../antialiased_rnext101_32x8d/__init__.py | 7 ++++ .../antialiased_rnext101_32x8d/model.py | 35 +++++++++++++++++++ .../requirements.txt | 1 + .../models/antialiased_rnext101_32x8d/test.py | 8 +++++ 4 files changed, 51 insertions(+) create mode 100644 brainscore_vision/models/antialiased_rnext101_32x8d/__init__.py create mode 100644 brainscore_vision/models/antialiased_rnext101_32x8d/model.py create mode 100644 brainscore_vision/models/antialiased_rnext101_32x8d/requirements.txt create mode 100644 brainscore_vision/models/antialiased_rnext101_32x8d/test.py diff --git a/brainscore_vision/models/antialiased_rnext101_32x8d/__init__.py b/brainscore_vision/models/antialiased_rnext101_32x8d/__init__.py new file mode 100644 index 000000000..658480430 --- /dev/null +++ b/brainscore_vision/models/antialiased_rnext101_32x8d/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['antialiased-rnext101_32x8d'] = lambda: ModelCommitment(identifier='antialiased-rnext101_32x8d', + activations_model=get_model('antialiased-rnext101_32x8d'), + layers=get_layers('antialiased-rnext101_32x8d')) \ No newline at end of file diff --git a/brainscore_vision/models/antialiased_rnext101_32x8d/model.py b/brainscore_vision/models/antialiased_rnext101_32x8d/model.py new file mode 100644 index 000000000..db367d89b --- /dev/null +++ b/brainscore_vision/models/antialiased_rnext101_32x8d/model.py @@ -0,0 +1,35 @@ +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +import ssl +import functools +import antialiased_cnns +from brainscore_vision.model_helpers.check_submission import check_models + +ssl._create_default_https_context = ssl._create_unverified_context + +def get_model(name): + assert name == 'antialiased-rnext101_32x8d' + model = antialiased_cnns.resnext101_32x8d(pretrained=True) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier='antialiased-rnext101_32x8d', model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'antialiased-rnext101_32x8d' + return ['maxpool', 'layer1.0', 'layer1.1', 'layer1.2', + 'layer2.0', 'layer2.1', 'layer2.2', 'layer2.3'] + ['layer3.' + str(i) for i in range(23)] + ['layer4.0', + 'layer4.1', + 'layer4.2', + 'avgpool', + 'fc'] + +def get_bibtex(model_identifier): + """ + A method returning the bibtex reference of the requested model as a string. + """ + return """""" + +if __name__ == '__main__': + check_models.check_base_models(__name__) \ No newline at end of file diff --git a/brainscore_vision/models/antialiased_rnext101_32x8d/requirements.txt b/brainscore_vision/models/antialiased_rnext101_32x8d/requirements.txt new file mode 100644 index 000000000..e5f287297 --- /dev/null +++ b/brainscore_vision/models/antialiased_rnext101_32x8d/requirements.txt @@ -0,0 +1 @@ +antialiased-cnns diff --git a/brainscore_vision/models/antialiased_rnext101_32x8d/test.py b/brainscore_vision/models/antialiased_rnext101_32x8d/test.py new file mode 100644 index 000000000..95fbbbf3f --- /dev/null +++ b/brainscore_vision/models/antialiased_rnext101_32x8d/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('antialiased-rnext101_32x8d') + assert model.identifier == 'antialiased-rnext101_32x8d' \ No newline at end of file From 1b6837a3cf6db1cc00dc6a429a31b1653d7e1fe6 Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Mon, 1 Jul 2024 15:53:44 -0400 Subject: [PATCH 23/68] Ep/add custom model cv 18 dagger 408 (#951) * Add custom_model_cv_18_dagger_408 * Update model.py --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .../custom_model_cv_18_dagger_408/__init__.py | 7 ++ .../custom_model_cv_18_dagger_408/model.py | 75 +++++++++++++++++++ .../requirements.txt | 4 + .../custom_model_cv_18_dagger_408/test.py | 8 ++ 4 files changed, 94 insertions(+) create mode 100644 brainscore_vision/models/custom_model_cv_18_dagger_408/__init__.py create mode 100644 brainscore_vision/models/custom_model_cv_18_dagger_408/model.py create mode 100644 brainscore_vision/models/custom_model_cv_18_dagger_408/requirements.txt create mode 100644 brainscore_vision/models/custom_model_cv_18_dagger_408/test.py diff --git a/brainscore_vision/models/custom_model_cv_18_dagger_408/__init__.py b/brainscore_vision/models/custom_model_cv_18_dagger_408/__init__.py new file mode 100644 index 000000000..b897c2013 --- /dev/null +++ b/brainscore_vision/models/custom_model_cv_18_dagger_408/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['custom_model_cv_18_dagger_408'] = lambda: ModelCommitment(identifier='custom_model_cv_18_dagger_408', + activations_model=get_model('custom_model_cv_18_dagger_408'), + layers=get_layers('custom_model_cv_18_dagger_408')) \ No newline at end of file diff --git a/brainscore_vision/models/custom_model_cv_18_dagger_408/model.py b/brainscore_vision/models/custom_model_cv_18_dagger_408/model.py new file mode 100644 index 000000000..1fc114081 --- /dev/null +++ b/brainscore_vision/models/custom_model_cv_18_dagger_408/model.py @@ -0,0 +1,75 @@ +import functools +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.check_submission import check_models +import torch +from brainscore_vision.model_helpers.s3 import load_weight_file +from timm.models import create_model +from brainscore_vision.model_helpers.activations.pytorch import load_images +import ssl +import numpy as np +from torchvision import transforms + + +ssl._create_default_https_context = ssl._create_unverified_context +INPUT_SIZE = 256 +BATCH_SIZE = 64 +LAYERS = ['blocks.1.blocks.1.4.norm2'] + + +def preprocess_images(images, image_size, **kwargs): + preprocess = torchvision_preprocess_input(image_size, **kwargs) + images = [preprocess(image) for image in images] + images = np.concatenate(images) + return images + + +def torchvision_preprocess_input(image_size, **kwargs): + return transforms.Compose([ + transforms.Resize((INPUT_SIZE, INPUT_SIZE)), + transforms.CenterCrop((image_size,image_size)), + torchvision_preprocess(**kwargs), + ]) + + +def torchvision_preprocess(normalize_mean=(0.485, 0.456, 0.406), normalize_std=(0.229, 0.224, 0.225)): + return transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=normalize_mean, std=normalize_std), + lambda img: img.unsqueeze(0) + ]) + + +def load_preprocess_custom_model(image_filepaths, image_size, **kwargs): + images = load_images(image_filepaths) + images = preprocess_images(images, image_size=image_size, **kwargs) + return images + +def get_model(name): + assert name == 'custom_model_cv_18_dagger_408' + model = create_model('crossvit_18_dagger_408', pretrained=False) + weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + relative_path="custom_model_cv_18_dagger_408/crossvit_18_dagger_408_adv_finetuned_epoch5.pt", + version_id="pQVPFC_iiWpRRr7P54qxQfRzjNSn2uYB", + sha1="c769518485e352d5a2e6f3e588d6208cbad71b69") + checkpoint = torch.load(weights_path, map_location=torch.device('cpu')) + model.load_state_dict(checkpoint['state_dict'], strict=True) + model.eval() + preprocessing = functools.partial(load_preprocess_custom_model, image_size=224) + activations_model = PytorchWrapper(identifier='custom_model_cv_18_dagger_408', model=model, + preprocessing=preprocessing, batch_size=BATCH_SIZE) + wrapper = activations_model + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'custom_model_cv_18_dagger_408' + return LAYERS + + +def get_bibtex(model_identifier): + return """""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/custom_model_cv_18_dagger_408/requirements.txt b/brainscore_vision/models/custom_model_cv_18_dagger_408/requirements.txt new file mode 100644 index 000000000..2ecd587ef --- /dev/null +++ b/brainscore_vision/models/custom_model_cv_18_dagger_408/requirements.txt @@ -0,0 +1,4 @@ +torchvision +torch +timm +numpy diff --git a/brainscore_vision/models/custom_model_cv_18_dagger_408/test.py b/brainscore_vision/models/custom_model_cv_18_dagger_408/test.py new file mode 100644 index 000000000..618332e33 --- /dev/null +++ b/brainscore_vision/models/custom_model_cv_18_dagger_408/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('custom_model_cv_18_dagger_408') + assert model.identifier == 'custom_model_cv_18_dagger_408' \ No newline at end of file From b340e051cbb50328f8ab40591ef2fa0b2292aa66 Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Mon, 1 Jul 2024 16:34:26 -0400 Subject: [PATCH 24/68] Add resnext101_32x16d_wsl (#941) * Add resnext101_32x16d_wsl * Remove get_model_list --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .../models/resnext101_32x16d_wsl/__init__.py | 7 ++++ .../models/resnext101_32x16d_wsl/model.py | 38 +++++++++++++++++++ .../resnext101_32x16d_wsl/requirements.txt | 2 + .../models/resnext101_32x16d_wsl/test.py | 8 ++++ 4 files changed, 55 insertions(+) create mode 100644 brainscore_vision/models/resnext101_32x16d_wsl/__init__.py create mode 100644 brainscore_vision/models/resnext101_32x16d_wsl/model.py create mode 100644 brainscore_vision/models/resnext101_32x16d_wsl/requirements.txt create mode 100644 brainscore_vision/models/resnext101_32x16d_wsl/test.py diff --git a/brainscore_vision/models/resnext101_32x16d_wsl/__init__.py b/brainscore_vision/models/resnext101_32x16d_wsl/__init__.py new file mode 100644 index 000000000..bc36b3641 --- /dev/null +++ b/brainscore_vision/models/resnext101_32x16d_wsl/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['resnext101_32x16d_wsl'] = lambda: ModelCommitment(identifier='resnext101_32x16d_wsl', + activations_model=get_model('resnext101_32x16d_wsl'), + layers=get_layers('resnext101_32x16d_wsl')) \ No newline at end of file diff --git a/brainscore_vision/models/resnext101_32x16d_wsl/model.py b/brainscore_vision/models/resnext101_32x16d_wsl/model.py new file mode 100644 index 000000000..744b10b2f --- /dev/null +++ b/brainscore_vision/models/resnext101_32x16d_wsl/model.py @@ -0,0 +1,38 @@ +import functools +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +import torch.hub +import ssl +from brainscore_vision.model_helpers.check_submission import check_models + + +ssl._create_default_https_context = ssl._create_unverified_context + + +def get_model(name): + assert name == 'resnext101_32x16d_wsl' + model_identifier = "resnext101_32x16d_wsl" + model = torch.hub.load('facebookresearch/WSL-Images', model_identifier) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + batch_size = {8: 32, 16: 16, 32: 8, 48: 4} + wrapper = PytorchWrapper(identifier=model_identifier, model=model, preprocessing=preprocessing, + batch_size=batch_size[16]) + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'resnext101_32x16d_wsl' + return (['conv1'] + + # note that while relu is used multiple times, by default the last one will overwrite all previous ones + [f"layer{block + 1}.{unit}.relu" + for block, block_units in enumerate([3, 4, 23, 3]) for unit in range(block_units)] + + ['avgpool']) + + +def get_bibtex(model_identifier): + return """x""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/resnext101_32x16d_wsl/requirements.txt b/brainscore_vision/models/resnext101_32x16d_wsl/requirements.txt new file mode 100644 index 000000000..a56666d38 --- /dev/null +++ b/brainscore_vision/models/resnext101_32x16d_wsl/requirements.txt @@ -0,0 +1,2 @@ +torchvision +torch \ No newline at end of file diff --git a/brainscore_vision/models/resnext101_32x16d_wsl/test.py b/brainscore_vision/models/resnext101_32x16d_wsl/test.py new file mode 100644 index 000000000..9aa5a490b --- /dev/null +++ b/brainscore_vision/models/resnext101_32x16d_wsl/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('resnext101_32x16d_wsl') + assert model.identifier == 'resnext101_32x16d_wsl' \ No newline at end of file From a434e11735213a4c6ce4047813a60a718022492a Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Mon, 1 Jul 2024 16:55:14 -0400 Subject: [PATCH 25/68] Add CORnet-S (#958) * Add CORnet-S * Fix imports * Fix syntax in requirements.txt * Try cornet in lower case * Reverting to original format * Update requirements.txt * Remove incompatible versions * Update identifier * unpinning torch --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Deirdre Kelliher --- brainscore_vision/models/cornet_s/__init__.py | 8 + .../models/cornet_s/helpers/helpers.py | 205 ++++++++++++++++++ brainscore_vision/models/cornet_s/model.py | 77 +++++++ .../models/cornet_s/requirements.txt | 7 + brainscore_vision/models/cornet_s/test.py | 8 + 5 files changed, 305 insertions(+) create mode 100644 brainscore_vision/models/cornet_s/__init__.py create mode 100644 brainscore_vision/models/cornet_s/helpers/helpers.py create mode 100644 brainscore_vision/models/cornet_s/model.py create mode 100644 brainscore_vision/models/cornet_s/requirements.txt create mode 100644 brainscore_vision/models/cornet_s/test.py diff --git a/brainscore_vision/models/cornet_s/__init__.py b/brainscore_vision/models/cornet_s/__init__.py new file mode 100644 index 000000000..8eb1c02f6 --- /dev/null +++ b/brainscore_vision/models/cornet_s/__init__.py @@ -0,0 +1,8 @@ +from brainscore_vision import model_registry +from .helpers.helpers import CORnetCommitment, _build_time_mappings +from .model import get_model, get_layers, TIME_MAPPINGS + + +model_registry['CORnet-S'] = lambda: CORnetCommitment(identifier='CORnet-S', activations_model=get_model('CORnet-S'), + layers=get_layers('CORnet-S'), + time_mapping=_build_time_mappings(TIME_MAPPINGS)) \ No newline at end of file diff --git a/brainscore_vision/models/cornet_s/helpers/helpers.py b/brainscore_vision/models/cornet_s/helpers/helpers.py new file mode 100644 index 000000000..5896135f3 --- /dev/null +++ b/brainscore_vision/models/cornet_s/helpers/helpers.py @@ -0,0 +1,205 @@ +from collections import defaultdict +from typing import Dict, Tuple +from brainscore_vision.model_helpers.brain_transformation.behavior import BehaviorArbiter, LogitsBehavior, ProbabilitiesMapping +from result_caching import store +from tqdm import tqdm +from brainscore_vision.model_helpers.activations.core import ActivationsExtractorHelper +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_interface import BrainModel +from brainio.assemblies import merge_data_arrays, NeuroidAssembly, walk_coords +import re +import numpy as np + + +class TemporalPytorchWrapper(PytorchWrapper): + def __init__(self, *args, separate_time=True, **kwargs): + self._separate_time = separate_time + super(TemporalPytorchWrapper, self).__init__(*args, **kwargs) + + def _build_extractor(self, *args, **kwargs): + if self._separate_time: + return TemporalExtractor(*args, **kwargs) + else: + return super(TemporalPytorchWrapper, self)._build_extractor(*args, **kwargs) + + def get_activations(self, images, layer_names): + # reset + self._layer_counter = defaultdict(lambda: 0) + self._layer_hooks = {} + return super(TemporalPytorchWrapper, self).get_activations(images=images, layer_names=layer_names) + + def register_hook(self, layer, layer_name, target_dict): + layer_name = self._strip_layer_timestep(layer_name) + if layer_name in self._layer_hooks: # add hook only once for multiple timesteps + return self._layer_hooks[layer_name] + + def hook_function(_layer, _input, output): + target_dict[f"{layer_name}-t{self._layer_counter[layer_name]}"] = PytorchWrapper._tensor_to_numpy(output) + self._layer_counter[layer_name] += 1 + + hook = layer.register_forward_hook(hook_function) + self._layer_hooks[layer_name] = hook + return hook + + def get_layer(self, layer_name): + layer_name = self._strip_layer_timestep(layer_name) + return super(TemporalPytorchWrapper, self).get_layer(layer_name) + + def _strip_layer_timestep(self, layer_name): + match = re.search('-t[0-9]+$', layer_name) + if match: + layer_name = layer_name[:match.start()] + return layer_name + + +class CORnetCommitment(BrainModel): + """ + CORnet commitment where only the model interface is implemented and behavioral readouts are attached. + Importantly, layer-region commitments do not occur due to the anatomical pre-mapping. + Further, due to the temporal component of the model, requested time-bins are matched to the nearest committed + time-bin for the model. + """ + + def __init__(self, identifier, activations_model, layers, + time_mapping: Dict[str, Dict[int, Tuple[int, int]]], behavioral_readout_layer=None, + visual_degrees=8): + """ + :param time_mapping: mapping from region -> {model_timestep -> (time_bin_start, time_bin_end)} + """ + self.layers = layers + self.region_assemblies = {} + self.activations_model = activations_model + self.time_mapping = time_mapping + self.recording_layers = None + self.recording_time_bins = None + self._identifier = identifier + + logits_behavior = LogitsBehavior( + identifier=identifier, activations_model=TemporalIgnore(activations_model)) + behavioral_readout_layer = behavioral_readout_layer or layers[-1] + probabilities_behavior = ProbabilitiesMapping( + identifier=identifier, activations_model=TemporalIgnore(activations_model), layer=behavioral_readout_layer) + self.behavior_model = BehaviorArbiter({BrainModel.Task.label: logits_behavior, + BrainModel.Task.probabilities: probabilities_behavior}) + self.do_behavior = False + + self._visual_degrees = visual_degrees + + @property + def identifier(self): + return self._identifier + + def visual_degrees(self) -> int: + return self._visual_degrees + + def start_recording(self, recording_target, time_bins): + self.recording_layers = [layer for layer in self.layers if layer.startswith(recording_target)] + self.recording_time_bins = time_bins + + def start_task(self, task: BrainModel.Task, *args, **kwargs): + if task != BrainModel.Task.passive: + self.behavior_model.start_task(task, *args, **kwargs) + self.do_behavior = True + + def look_at(self, stimuli, number_of_trials=1): + if self.do_behavior: + return self.behavior_model.look_at(stimuli) + else: + # cache, since piecing times together is not too fast unfortunately + return self.look_at_cached(self.identifier, stimuli.identifier, stimuli) + + @store(identifier_ignore=['stimuli']) + def look_at_cached(self, model_identifier, stimuli_identifier, stimuli): + responses = self.activations_model(stimuli, layers=self.recording_layers) + # map time + regions = set(responses['region'].values) + if len(regions) > 1: + raise NotImplementedError("cannot handle more than one simultaneous region") + region = list(regions)[0] + time_bins = [self.time_mapping[region][timestep] if timestep in self.time_mapping[region] else (None, None) + for timestep in responses['time_step'].values] + responses['time_bin_start'] = 'time_step', [time_bin[0] for time_bin in time_bins] + responses['time_bin_end'] = 'time_step', [time_bin[1] for time_bin in time_bins] + responses = NeuroidAssembly(responses.rename({'time_step': 'time_bin'})) + responses = responses[{'time_bin': [not np.isnan(time_start) for time_start in responses['time_bin_start']]}] + # select time + time_responses = [] + for time_bin in tqdm(self.recording_time_bins, desc='CORnet-time to recording time'): + time_bin = time_bin if not isinstance(time_bin, np.ndarray) else time_bin.tolist() + time_bin_start, time_bin_end = time_bin + nearest_start = find_nearest(responses['time_bin_start'].values, time_bin_start) + bin_responses = responses.sel(time_bin_start=nearest_start) + bin_responses = NeuroidAssembly(bin_responses.values, coords={ + **{coord: (dims, values) for coord, dims, values in walk_coords(bin_responses) + if coord not in ['time_bin_level_0', 'time_bin_end']}, + **{'time_bin_start': ('time_bin', [time_bin_start]), + 'time_bin_end': ('time_bin', [time_bin_end])} + }, dims=bin_responses.dims) + time_responses.append(bin_responses) + responses = merge_data_arrays(time_responses) + return responses + + +def find_nearest(array, value): + array = np.asarray(array) + idx = (np.abs(array - value)).argmin() + return array[idx] + + +class TemporalIgnore: + """ + Wrapper around a activations model that squeezes out the temporal axis. + Useful when there is only one time step and the behavioral readout does not know what to do with time. + """ + + def __init__(self, temporal_activations_model): + self._activations_model = temporal_activations_model + + def __call__(self, *args, **kwargs): + activations = self._activations_model(*args, **kwargs) + activations = activations.squeeze('time_step') + return activations + + +class TemporalExtractor(ActivationsExtractorHelper): + # `from_paths` is the earliest method at which we can interject because calls below are stored and checked for the + # presence of all layers which, for CORnet, are passed as e.g. `IT.output-t0`. + # This code re-arranges the time component. + def from_paths(self, *args, **kwargs): + raw_activations = super(TemporalExtractor, self).from_paths(*args, **kwargs) + # introduce time dimension + regions = defaultdict(list) + for layer in set(raw_activations['layer'].values): + match = re.match(r'(([^-]*)\..*|logits|avgpool)-t([0-9]+)', layer) + region, timestep = match.group(2) if match.group(2) else match.group(1), match.group(3) + stripped_layer = match.group(1) + regions[region].append((layer, stripped_layer, timestep)) + activations = {} + for region, time_layers in regions.items(): + for (full_layer, stripped_layer, timestep) in time_layers: + region_time_activations = raw_activations.sel(layer=full_layer) + region_time_activations['layer'] = 'neuroid', [stripped_layer] * len(region_time_activations['neuroid']) + activations[(region, timestep)] = region_time_activations + for key, key_activations in activations.items(): + region, timestep = key + key_activations['region'] = 'neuroid', [region] * len(key_activations['neuroid']) + activations[key] = NeuroidAssembly([key_activations.values], coords={ + **{coord: (dims, values) for coord, dims, values in walk_coords(activations[key]) + if coord != 'neuroid_id'}, # otherwise, neuroid dim will be as large as before with nans + **{'time_step': [int(timestep)]} + }, dims=['time_step'] + list(key_activations.dims)) + activations = list(activations.values()) + activations = merge_data_arrays(activations) + # rebuild neuroid_id without timestep + neuroid_id = [".".join([f"{value}" for value in values]) for values in zip(*[ + activations[coord].values for coord in ['model', 'region', 'neuroid_num']])] + activations['neuroid_id'] = 'neuroid', neuroid_id + return activations + + +def _build_time_mappings(time_mappings): + return {region: { + timestep: (time_start + timestep * time_step_size, + time_start + (timestep + 1) * time_step_size) + for timestep in range(0, timesteps)} + for region, (time_start, time_step_size, timesteps) in time_mappings.items()} diff --git a/brainscore_vision/models/cornet_s/model.py b/brainscore_vision/models/cornet_s/model.py new file mode 100644 index 000000000..9862e34ac --- /dev/null +++ b/brainscore_vision/models/cornet_s/model.py @@ -0,0 +1,77 @@ +import functools +import importlib +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from brainscore_vision.model_helpers.check_submission import check_models +import torch.hub +import ssl +from brainscore_vision.model_helpers.s3 import load_weight_file +from torch.nn import Module +from .helpers.helpers import TemporalPytorchWrapper + + +ssl._create_default_https_context = ssl._create_unverified_context + + +TIME_MAPPINGS = { + 'V1': (50, 100, 1), + 'V2': (70, 100, 2), + # 'V2': (20, 50, 2), # MS: This follows from the movshon anesthesized-monkey recordings, so might not hold up + 'V4': (90, 50, 4), + 'IT': (100, 100, 2), + } + + +def get_model(name): + assert name == 'CORnet-S' + + class Wrapper(Module): + def __init__(self, model): + super(Wrapper, self).__init__() + self.module = model + + mod = importlib.import_module(f'cornet.cornet_s') + model_ctr = getattr(mod, 'CORnet_S') + model = model_ctr() + model = Wrapper(model) # model was wrapped with DataParallel, so weights require `module.` prefix + weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + relative_path="cornet_s/cornet_s_epoch43.pth.tar", + version_id="4EAQnCqTy.2MCKiXTJ4l02iG8l3e.yfQ", + sha1="a4bfd8eda33b45fd945da1b972ab0b7cad38d60f") + checkpoint = torch.load(weights_path, map_location=lambda storage, loc: storage) # map onto cpu + model.load_state_dict(checkpoint['state_dict']) + model = model.module # unwrap + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = TemporalPytorchWrapper(identifier="CORnet-S", model=model, preprocessing=preprocessing, + separate_time=True) + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'CORnet-S' + return (['V1.output-t0'] + + [f'{area}.output-t{timestep}' + for area, timesteps in [('V2', range(2)), ('V4', range(4)), ('IT', range(2))] + for timestep in timesteps] + + ['decoder.avgpool-t0'] + ) + + +def get_bibtex(model_identifier): + return """@inproceedings{KubiliusSchrimpf2019CORnet, + abstract = {Deep convolutional artificial neural networks (ANNs) are the leading class of candidate models of the mechanisms of visual processing in the primate ventral stream. While initially inspired by brain anatomy, over the past years, these ANNs have evolved from a simple eight-layer architecture in AlexNet to extremely deep and branching architectures, demonstrating increasingly better object categorization performance, yet bringing into question how brain-like they still are. In particular, typical deep models from the machine learning community are often hard to map onto the brain's anatomy due to their vast number of layers and missing biologically-important connections, such as recurrence. Here we demonstrate that better anatomical alignment to the brain and high performance on machine learning as well as neuroscience measures do not have to be in contradiction. We developed CORnet-S, a shallow ANN with four anatomically mapped areas and recurrent connectivity, guided by Brain-Score, a new large-scale composite of neural and behavioral benchmarks for quantifying the functional fidelity of models of the primate ventral visual stream. Despite being significantly shallower than most models, CORnet-S is the top model on Brain-Score and outperforms similarly compact models on ImageNet. Moreover, our extensive analyses of CORnet-S circuitry variants reveal that recurrence is the main predictive factor of both Brain-Score and ImageNet top-1 performance. Finally, we report that the temporal evolution of the CORnet-S "IT" neural population resembles the actual monkey IT population dynamics. Taken together, these results establish CORnet-S, a compact, recurrent ANN, as the current best model of the primate ventral visual stream.}, + archivePrefix = {arXiv}, + arxivId = {1909.06161}, + author = {Kubilius, Jonas and Schrimpf, Martin and Hong, Ha and Majaj, Najib J. and Rajalingham, Rishi and Issa, Elias B. and Kar, Kohitij and Bashivan, Pouya and Prescott-Roy, Jonathan and Schmidt, Kailyn and Nayebi, Aran and Bear, Daniel and Yamins, Daniel L. K. and DiCarlo, James J.}, + booktitle = {Neural Information Processing Systems (NeurIPS)}, + editor = {Wallach, H. and Larochelle, H. and Beygelzimer, A. and D'Alch{\'{e}}-Buc, F. and Fox, E. and Garnett, R.}, + pages = {12785----12796}, + publisher = {Curran Associates, Inc.}, + title = {{Brain-Like Object Recognition with High-Performing Shallow Recurrent ANNs}}, + url = {http://papers.nips.cc/paper/9441-brain-like-object-recognition-with-high-performing-shallow-recurrent-anns}, + year = {2019} + }""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) \ No newline at end of file diff --git a/brainscore_vision/models/cornet_s/requirements.txt b/brainscore_vision/models/cornet_s/requirements.txt new file mode 100644 index 000000000..4efbdb4a5 --- /dev/null +++ b/brainscore_vision/models/cornet_s/requirements.txt @@ -0,0 +1,7 @@ +torchvision +torch +pandas +xarray +numpy +scipy +CORnet @ git+https://github.com/dicarlolab/CORnet.git diff --git a/brainscore_vision/models/cornet_s/test.py b/brainscore_vision/models/cornet_s/test.py new file mode 100644 index 000000000..1efdfe8cf --- /dev/null +++ b/brainscore_vision/models/cornet_s/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('CORnet-S') + assert model.identifier == 'CORnet-S' \ No newline at end of file From 6380fd364ffae03c6de577b050de742a7cf80451 Mon Sep 17 00:00:00 2001 From: Ben Lonnqvist Date: Tue, 2 Jul 2024 09:21:20 +0200 Subject: [PATCH 26/68] Fix a behavioral assertion about stimulus order (#971) * add a test to check for stimulus id order * add comment to the added method --- .../model_helpers/brain_transformation/behavior.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/brainscore_vision/model_helpers/brain_transformation/behavior.py b/brainscore_vision/model_helpers/brain_transformation/behavior.py index 28aeb9e6a..97cc62dcd 100644 --- a/brainscore_vision/model_helpers/brain_transformation/behavior.py +++ b/brainscore_vision/model_helpers/brain_transformation/behavior.py @@ -192,7 +192,7 @@ def start_task(self, task: BrainModel.Task, fitting_stimuli, number_of_trials=1, number_of_trials=number_of_trials, require_variance=require_variance) fitting_features = fitting_features.transpose('presentation', 'neuroid') - assert all(fitting_features['stimulus_id'].values == fitting_stimuli['stimulus_id'].values), \ + assert all(self.order_preserving_unique(fitting_features['stimulus_id'].values) == fitting_stimuli['stimulus_id'].values), \ "stimulus_id ordering is incorrect" self.classifier.fit(fitting_features, fitting_features['image_label']) @@ -241,6 +241,16 @@ def labels_to_indices(self, labels): index2label = OrderedDict((index, label) for label, index in label2index.items()) return indices, index2label + @staticmethod + def order_preserving_unique(array): + """ + This function sorts an array and removes duplicates while preserving the order of the elements. + This function is used in favor of np.unique to ensure that the order of the stimulus_ids is preserved, as + np.unique performs sorting on the array. + """ + _, indices = np.unique(array, return_index=True) + return array[np.sort(indices)] + class OddOneOut(BrainModel): def __init__(self, identifier: str, activations_model, layer: Union[str, List[str]]): From ddcb67b2e7ae56de1ef33a0c8439a4f54dcfa8fd Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Tue, 2 Jul 2024 11:41:59 +0200 Subject: [PATCH 27/68] fix interface specification (#438) * fix newline for pydoc formatting * fix time_bins parameter annotation each time bin consists of a tuple of two values (start, end), e.g. `time_bins=[(70, 170)]` --- brainscore_vision/model_interface.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/brainscore_vision/model_interface.py b/brainscore_vision/model_interface.py index bd07c5581..48622282d 100644 --- a/brainscore_vision/model_interface.py +++ b/brainscore_vision/model_interface.py @@ -132,7 +132,7 @@ class Task: Example: Setting up an odd-one-out task for a list of triplets with `start_task(BrainModel.Task.odd_one_out)` and calling - + .. code-block:: python look_at(['image1.png', 'image2.png', 'image3.png', #triplet 1 @@ -172,7 +172,7 @@ class RecordingTarget: V4 = 'V4' IT = 'IT' - def start_recording(self, recording_target: RecordingTarget, time_bins: List[Tuple[int]]) -> None: + def start_recording(self, recording_target: RecordingTarget, time_bins: List[Tuple[int, int]]) -> None: """ Instructs the model to begin recording in a specified :data:`~brainscore_vision.model_interface.BrainModel.RecordingTarget` and return the specified `time_bins`. From 53cda030db23b00422060ee9266f2ac7bdfe86ce Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Tue, 2 Jul 2024 09:24:48 -0400 Subject: [PATCH 28/68] Ep/add focalnet tiny lrf in1k (#945) * Add focalnet_tiny_in1k_submission * Remove get_model_list * Update requirements.txt --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .../focalnet_tiny_in1k_submission/__init__.py | 5 ++ .../focalnet_tiny_in1k_submission/model.py | 62 +++++++++++++++++++ .../requirements.txt | 3 + .../focalnet_tiny_in1k_submission/test.py | 8 +++ 4 files changed, 78 insertions(+) create mode 100644 brainscore_vision/models/focalnet_tiny_in1k_submission/__init__.py create mode 100644 brainscore_vision/models/focalnet_tiny_in1k_submission/model.py create mode 100644 brainscore_vision/models/focalnet_tiny_in1k_submission/requirements.txt create mode 100644 brainscore_vision/models/focalnet_tiny_in1k_submission/test.py diff --git a/brainscore_vision/models/focalnet_tiny_in1k_submission/__init__.py b/brainscore_vision/models/focalnet_tiny_in1k_submission/__init__.py new file mode 100644 index 000000000..321ae9cc0 --- /dev/null +++ b/brainscore_vision/models/focalnet_tiny_in1k_submission/__init__.py @@ -0,0 +1,5 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['focalnet_tiny_lrf_in1k'] = lambda: ModelCommitment(identifier='focalnet_tiny_lrf_in1k', activations_model=get_model('focalnet_tiny_lrf_in1k'), layers=get_layers('focalnet_tiny_lrf_in1k')) diff --git a/brainscore_vision/models/focalnet_tiny_in1k_submission/model.py b/brainscore_vision/models/focalnet_tiny_in1k_submission/model.py new file mode 100644 index 000000000..c985f4dd0 --- /dev/null +++ b/brainscore_vision/models/focalnet_tiny_in1k_submission/model.py @@ -0,0 +1,62 @@ +from brainscore_vision.model_helpers.check_submission import check_models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +import torchvision +import functools +import torch +import os +import timm + +dir_path = os.path.dirname(os.path.realpath(__file__)) + +""" +Template module for a base model submission to brain-score +""" + + +def get_model(name): + """ + This method fetches an instance of a base model. The instance has to be callable and return a xarray object, + containing activations. There exist standard wrapper implementations for common libraries, like pytorch and + keras. Checkout the examples folder, to see more. For custom implementations check out the implementation of the + wrappers. + :param name: the name of the model to fetch + :return: the model instance + """ + assert name == 'focalnet_tiny_lrf_in1k' + + model = timm.create_model('focalnet_tiny_lrf.ms_in1k', pretrained=True) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier='focalnet_tiny_lrf_in1k', model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'focalnet_tiny_lrf_in1k' + """ + This method returns a list of string layer names to consider per model. The benchmarks maps brain regions to + layers and uses this list as a set of possible layers. The lists doesn't have to contain all layers, the less the + faster the benchmark process works. Additionally the given layers have to produce an activations vector of at least + size 25! The layer names are delivered back to the model instance and have to be resolved in there. For a pytorch + model, the layer name are for instance dot concatenated per module, e.g. "features.2". + :param name: the name of the model, to return the layers for + :return: a list of strings containing all layers, that should be considered as brain area. + """ + return ['layers.0.blocks.0', 'layers.0.blocks.1', 'layers.1.downsample', + 'layers.1.blocks.0', 'layers.1.blocks.1', 'layers.2.downsample', 'layers.2.blocks.0', + 'layers.2.blocks.1', 'layers.2.blocks.2', 'layers.2.blocks.3', 'layers.2.blocks.4', + 'layers.2.blocks.5', 'layers.3.downsample', 'layers.3.blocks.0', 'layers.3.blocks.1', 'norm','head.global_pool', 'head.fc'] + + +def get_bibtex(model_identifier): + """ + A method returning the bibtex reference of the requested model as a string. + """ + return '' + + +if __name__ == '__main__': + # Use this method to ensure the correctness of the BaseModel implementations. + # It executes a mock run of brain-score benchmarks. + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/focalnet_tiny_in1k_submission/requirements.txt b/brainscore_vision/models/focalnet_tiny_in1k_submission/requirements.txt new file mode 100644 index 000000000..219d778fb --- /dev/null +++ b/brainscore_vision/models/focalnet_tiny_in1k_submission/requirements.txt @@ -0,0 +1,3 @@ +timm +torch +torchvision diff --git a/brainscore_vision/models/focalnet_tiny_in1k_submission/test.py b/brainscore_vision/models/focalnet_tiny_in1k_submission/test.py new file mode 100644 index 000000000..c17d23d21 --- /dev/null +++ b/brainscore_vision/models/focalnet_tiny_in1k_submission/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('focalnet_tiny_lrf_in1k') + assert model.identifier == 'focalnet_tiny_lrf_in1k' \ No newline at end of file From 4d4488cf318f13bc5add626cbe591695437f8585 Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Tue, 2 Jul 2024 16:44:41 +0200 Subject: [PATCH 29/68] fix identifier metadata on Hebart2023 (#685) Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- brainscore_vision/benchmarks/hebart2023/benchmark.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/brainscore_vision/benchmarks/hebart2023/benchmark.py b/brainscore_vision/benchmarks/hebart2023/benchmark.py index d2551fe70..e72f38653 100644 --- a/brainscore_vision/benchmarks/hebart2023/benchmark.py +++ b/brainscore_vision/benchmarks/hebart2023/benchmark.py @@ -20,7 +20,7 @@ class Hebart2023Match(BenchmarkBase): - def __init__(self, similarity_measure='dot'): + def __init__(self): self._visual_degrees = VISUAL_DEGREES self._number_of_trials = 1 self._assembly = load_dataset('Hebart2023') @@ -29,9 +29,9 @@ def __init__(self, similarity_measure='dot'): # The noise ceiling was computed by averaging the percentage of participants # who made the same choice for a given triplet. See the paper for more detail. super().__init__( - identifier=f'Habart2023Match_{similarity_measure}', version=1, + identifier='Hebart2023-match', version=1, ceiling_func=lambda: Score(0.6767), - parent='Hebart2023', + parent='behavior_vision', bibtex=BIBTEX ) From 8137fe1aa6b4d4b50e4908be9e0ff7fafb8298a9 Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Tue, 2 Jul 2024 10:48:28 -0400 Subject: [PATCH 30/68] Add pnasnet_large (#996) * Add model files * Update model.py --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .../models/pnasnet_large_pytorch/__init__.py | 7 +++ .../models/pnasnet_large_pytorch/model.py | 59 +++++++++++++++++++ .../pnasnet_large_pytorch/requirements.txt | 3 + .../models/pnasnet_large_pytorch/test.py | 8 +++ 4 files changed, 77 insertions(+) create mode 100644 brainscore_vision/models/pnasnet_large_pytorch/__init__.py create mode 100644 brainscore_vision/models/pnasnet_large_pytorch/model.py create mode 100644 brainscore_vision/models/pnasnet_large_pytorch/requirements.txt create mode 100644 brainscore_vision/models/pnasnet_large_pytorch/test.py diff --git a/brainscore_vision/models/pnasnet_large_pytorch/__init__.py b/brainscore_vision/models/pnasnet_large_pytorch/__init__.py new file mode 100644 index 000000000..68a05506f --- /dev/null +++ b/brainscore_vision/models/pnasnet_large_pytorch/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['pnasnet_large_pytorch'] = lambda: ModelCommitment(identifier='pnasnet_large_pytorch', + activations_model=get_model('pnasnet_large_pytorch'), + layers=get_layers('pnasnet_large_pytorch')) \ No newline at end of file diff --git a/brainscore_vision/models/pnasnet_large_pytorch/model.py b/brainscore_vision/models/pnasnet_large_pytorch/model.py new file mode 100644 index 000000000..402b75be8 --- /dev/null +++ b/brainscore_vision/models/pnasnet_large_pytorch/model.py @@ -0,0 +1,59 @@ +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.check_submission import check_models +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +import ssl +import functools +import timm + +ssl._create_default_https_context = ssl._create_unverified_context + +''' +This is a Pytorch implementation of pnasnet_large. + +Previously on Brain-Score, this model existed as a Tensorflow model, and was converted via: + https://huggingface.co/timm/pnasnet5large.tf_in1k + +Disclaimer: This (pytorch) implementation's Brain-Score scores might not align identically with Tensorflow +implementation. + +''' + +MODEL = timm.create_model('pnasnet5large.tf_in1k', pretrained=True) + +def get_model(name): + assert name == 'pnasnet_large_pytorch' + preprocessing = functools.partial(load_preprocess_images, image_size=331) + wrapper = PytorchWrapper(identifier='pnasnet_large_pytorch', model=MODEL, + preprocessing=preprocessing, + batch_size=4) # doesn't fit into 12 GB GPU memory otherwise + wrapper.image_size = 331 + return wrapper + + +def get_layers(name): + assert name == 'pnasnet_large_pytorch' + layer_names = [] + + for name, module in MODEL.named_modules(): + layer_names.append(name) + + return layer_names[2:] + + +def get_bibtex(model_identifier): + """ + A method returning the bibtex reference of the requested model as a string. + """ + return """@misc{liu2018progressive, + title={Progressive Neural Architecture Search}, + author={Chenxi Liu and Barret Zoph and Maxim Neumann and Jonathon Shlens and Wei Hua and Li-Jia Li and Li Fei-Fei and Alan Yuille and Jonathan Huang and Kevin Murphy}, + year={2018}, + eprint={1712.00559}, + archivePrefix={arXiv}, + primaryClass={cs.CV} + } + """ + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/pnasnet_large_pytorch/requirements.txt b/brainscore_vision/models/pnasnet_large_pytorch/requirements.txt new file mode 100644 index 000000000..271376616 --- /dev/null +++ b/brainscore_vision/models/pnasnet_large_pytorch/requirements.txt @@ -0,0 +1,3 @@ +torchvision +torch +timm \ No newline at end of file diff --git a/brainscore_vision/models/pnasnet_large_pytorch/test.py b/brainscore_vision/models/pnasnet_large_pytorch/test.py new file mode 100644 index 000000000..72dc1a357 --- /dev/null +++ b/brainscore_vision/models/pnasnet_large_pytorch/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('pnasnet_large_pytorch') + assert model.identifier == 'pnasnet_large_pytorch' \ No newline at end of file From 67d0a53002024dd0c1a3d28c6f0e68b79db31f3a Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Tue, 2 Jul 2024 10:49:16 -0400 Subject: [PATCH 31/68] Ep/add densenet 201 pytorch (#950) * Add densenet_201_pytorch * Update model.py --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .../models/densenet_201_pytorch/__init__.py | 7 +++ .../models/densenet_201_pytorch/model.py | 59 +++++++++++++++++++ .../densenet_201_pytorch/requirements.txt | 3 + .../models/densenet_201_pytorch/test.py | 8 +++ 4 files changed, 77 insertions(+) create mode 100644 brainscore_vision/models/densenet_201_pytorch/__init__.py create mode 100644 brainscore_vision/models/densenet_201_pytorch/model.py create mode 100644 brainscore_vision/models/densenet_201_pytorch/requirements.txt create mode 100644 brainscore_vision/models/densenet_201_pytorch/test.py diff --git a/brainscore_vision/models/densenet_201_pytorch/__init__.py b/brainscore_vision/models/densenet_201_pytorch/__init__.py new file mode 100644 index 000000000..317797713 --- /dev/null +++ b/brainscore_vision/models/densenet_201_pytorch/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['densenet_201_pytorch'] = lambda: ModelCommitment(identifier='densenet_201_pytorch', + activations_model=get_model('densenet_201_pytorch'), + layers=get_layers('densenet_201_pytorch')) \ No newline at end of file diff --git a/brainscore_vision/models/densenet_201_pytorch/model.py b/brainscore_vision/models/densenet_201_pytorch/model.py new file mode 100644 index 000000000..c845ad954 --- /dev/null +++ b/brainscore_vision/models/densenet_201_pytorch/model.py @@ -0,0 +1,59 @@ +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +import ssl +import functools +import timm +from brainscore_vision.model_helpers.check_submission import check_models + +ssl._create_default_https_context = ssl._create_unverified_context + +''' +This is a Pytorch implementation of densenet_201. + +Previously on Brain-Score, this model existed as a Tensorflow model, and was converted via: + https://huggingface.co/timm/densenet201.tv_in1k + +Disclaimer: This (pytorch) implementation's Brain-Score scores might not align identically with Tensorflow +implementation. + +''' + + +MODEL = timm.create_model('densenet201.tv_in1k', pretrained=True) + + +def get_model(name): + assert name == 'densenet_201_pytorch' + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier='densenet_201_pytorch', model=MODEL, + preprocessing=preprocessing, + batch_size=4) # doesn't fit into 12 GB GPU memory otherwise + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'densenet_201_pytorch' + layer_names = [] + + for name, module in MODEL.named_modules(): + layer_names.append(name) + + return layer_names[2:] + + +def get_bibtex(model_identifier): + """ + A method returning the bibtex reference of the requested model as a string. + """ + return """@inproceedings{huang2017densely, + title={Densely Connected Convolutional Networks}, + author={Huang, Gao and Liu, Zhuang and van der Maaten, Laurens and Weinberger, Kilian Q }, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + year={2017} + } + """ + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/densenet_201_pytorch/requirements.txt b/brainscore_vision/models/densenet_201_pytorch/requirements.txt new file mode 100644 index 000000000..271376616 --- /dev/null +++ b/brainscore_vision/models/densenet_201_pytorch/requirements.txt @@ -0,0 +1,3 @@ +torchvision +torch +timm \ No newline at end of file diff --git a/brainscore_vision/models/densenet_201_pytorch/test.py b/brainscore_vision/models/densenet_201_pytorch/test.py new file mode 100644 index 000000000..6a6deb0b5 --- /dev/null +++ b/brainscore_vision/models/densenet_201_pytorch/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('densenet_201_pytorch') + assert model.identifier == 'densenet_201_pytorch' \ No newline at end of file From 452ce233992eb76772bc0e37942d56932e0e9bee Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Tue, 2 Jul 2024 10:49:38 -0400 Subject: [PATCH 32/68] Ep/add inception v3 pytorch (#944) * Add inception_v3_pytorch * Update model.py * Update model.py --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .../models/inception_v3_pytorch/__init__.py | 7 ++ .../models/inception_v3_pytorch/model.py | 68 +++++++++++++++++++ .../inception_v3_pytorch/requirements.txt | 3 + .../models/inception_v3_pytorch/test.py | 8 +++ 4 files changed, 86 insertions(+) create mode 100644 brainscore_vision/models/inception_v3_pytorch/__init__.py create mode 100644 brainscore_vision/models/inception_v3_pytorch/model.py create mode 100644 brainscore_vision/models/inception_v3_pytorch/requirements.txt create mode 100644 brainscore_vision/models/inception_v3_pytorch/test.py diff --git a/brainscore_vision/models/inception_v3_pytorch/__init__.py b/brainscore_vision/models/inception_v3_pytorch/__init__.py new file mode 100644 index 000000000..3b1a9dd4a --- /dev/null +++ b/brainscore_vision/models/inception_v3_pytorch/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['inception_v3_pytorch'] = lambda: ModelCommitment(identifier='inception_v3_pytorch', + activations_model=get_model('inception_v3_pytorch'), + layers=get_layers('inception_v3_pytorch')) \ No newline at end of file diff --git a/brainscore_vision/models/inception_v3_pytorch/model.py b/brainscore_vision/models/inception_v3_pytorch/model.py new file mode 100644 index 000000000..834b6e4e9 --- /dev/null +++ b/brainscore_vision/models/inception_v3_pytorch/model.py @@ -0,0 +1,68 @@ +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +import ssl +import functools +import timm +from brainscore_vision.model_helpers.check_submission import check_models + +ssl._create_default_https_context = ssl._create_unverified_context + +''' +This is a Pytorch implementation of inception_v3. + +Previously on Brain-Score, this model existed as a Tensorflow model, and was converted via: + https://huggingface.co/docs/timm/en/models/inception-v3 + +Disclaimer: This (pytorch) implementation's Brain-Score scores might not align identically with Tensorflow +implementation. + +''' + +MODEL = timm.create_model('inception_v3', pretrained=True) + +def get_model(name): + assert name == 'inception_v3_pytorch' + preprocessing = functools.partial(load_preprocess_images, image_size=299) + wrapper = PytorchWrapper(identifier='inception_v3_pytorch', model=MODEL, + preprocessing=preprocessing, + batch_size=4) # doesn't fit into 12 GB GPU memory otherwise + wrapper.image_size = 299 + return wrapper + + +def get_layers(name): + assert name == 'inception_v3_pytorch' + layer_names = [] + + for name, module in MODEL.named_modules(): + layer_names.append(name) + + return layer_names[2:] + + +def get_bibtex(model_identifier): + """ + A method returning the bibtex reference of the requested model as a string. + """ + return """@article{DBLP:journals/corr/SzegedyVISW15, + author = {Christian Szegedy and + Vincent Vanhoucke and + Sergey Ioffe and + Jonathon Shlens and + Zbigniew Wojna}, + title = {Rethinking the Inception Architecture for Computer Vision}, + journal = {CoRR}, + volume = {abs/1512.00567}, + year = {2015}, + url = {http://arxiv.org/abs/1512.00567}, + archivePrefix = {arXiv}, + eprint = {1512.00567}, + timestamp = {Mon, 13 Aug 2018 16:49:07 +0200}, + biburl = {https://dblp.org/rec/journals/corr/SzegedyVISW15.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} + } + """ + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/inception_v3_pytorch/requirements.txt b/brainscore_vision/models/inception_v3_pytorch/requirements.txt new file mode 100644 index 000000000..271376616 --- /dev/null +++ b/brainscore_vision/models/inception_v3_pytorch/requirements.txt @@ -0,0 +1,3 @@ +torchvision +torch +timm \ No newline at end of file diff --git a/brainscore_vision/models/inception_v3_pytorch/test.py b/brainscore_vision/models/inception_v3_pytorch/test.py new file mode 100644 index 000000000..c42e7ce9d --- /dev/null +++ b/brainscore_vision/models/inception_v3_pytorch/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('inception_v3_pytorch') + assert model.identifier == 'inception_v3_pytorch' \ No newline at end of file From 48237a8374e9372c77e1503ca0498b724db8d33b Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Wed, 3 Jul 2024 09:34:19 -0400 Subject: [PATCH 33/68] Ep/add effnetb1 cutmix augmix sam e1 5avg 424x377 (#948) * Add effnetb1_cutmix_augmix_sam_e1_5avg_424x377 * Update model.py * Update requirements.txt --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .../__init__.py | 9 ++ .../model.py | 111 ++++++++++++++++++ .../requirements.txt | 6 + .../test.py | 8 ++ 4 files changed, 134 insertions(+) create mode 100644 brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/__init__.py create mode 100644 brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py create mode 100644 brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/requirements.txt create mode 100644 brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/test.py diff --git a/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/__init__.py b/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/__init__.py new file mode 100644 index 000000000..1a0c7215a --- /dev/null +++ b/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['effnetb1_cutmix_augmix_sam_e1_5avg_424x377'] = lambda: ModelCommitment( + identifier='effnetb1_cutmix_augmix_sam_e1_5avg_424x377', + activations_model=get_model('effnetb1_cutmix_augmix_sam_e1_5avg_424x377'), + layers=get_layers('effnetb1_cutmix_augmix_sam_e1_5avg_424x377') +) \ No newline at end of file diff --git a/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py b/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py new file mode 100644 index 000000000..310fa7b04 --- /dev/null +++ b/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/model.py @@ -0,0 +1,111 @@ +import functools +import torch +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from PIL import Image +import numpy as np +import timm +import torch.nn as nn +from albumentations import ( + Compose, Normalize, Resize, CenterCrop +) +from albumentations.pytorch import ToTensorV2 +from brainscore_vision.model_helpers import load_weight_file +from brainscore_vision.model_helpers.check_submission import check_models + +image_resize = 424 +image_crop = 377 +norm_mean = [0.485, 0.456, 0.406] +norm_std = [0.229, 0.224, 0.225] + + +def custom_image_preprocess(images, **kwargs): + transforms_val = Compose([ + Resize(image_resize, image_resize), + CenterCrop(image_crop, image_crop), + Normalize(mean=norm_mean, std=norm_std, ), + ToTensorV2()]) + + images = [np.array(pillow_image) for pillow_image in images] + images = [transforms_val(image=image)["image"] for image in images] + images = np.stack(images) + + return images + + +def load_preprocess_images_custom(image_filepaths, preprocess_images=custom_image_preprocess, **kwargs): + images = [load_image(image_filepath) for image_filepath in image_filepaths] + images = preprocess_images(images, **kwargs) + return images + + +def load_image(image_filepath): + with Image.open(image_filepath) as pil_image: + if 'L' not in pil_image.mode.upper() and 'A' not in pil_image.mode.upper() \ + and 'P' not in pil_image.mode.upper(): # not binary and not alpha and not palletized + # work around to https://github.com/python-pillow/Pillow/issues/1144, + # see https://stackoverflow.com/a/30376272/2225200 + return pil_image.copy() + else: # make sure potential binary images are in RGB + rgb_image = Image.new("RGB", pil_image.size) + rgb_image.paste(pil_image) + return rgb_image + + +class EffNetBX(nn.Module): + def __init__(self, ): + super().__init__() + self.efnet_model = timm.create_model('tf_efficientnet_b1_ns', pretrained=True) + + def forward(self, x): + x = self.efnet_model(x) + return x + + +def get_model(name): + assert name == 'effnetb1_cutmix_augmix_sam_e1_5avg_424x377' + model_tf_efficientnet_b1_ns = EffNetBX() + + weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + relative_path="effnetb1_cutmix_augmix_sam_e1_5avg_424x377/weights1_5_avg.pth", + version_id="EqB6P7BittVdkgRd3oMncq_j9AAdiYvz", + sha1="871bd10e6ce164bfe8f3ce10bb77a69d326d7b65") + model_tf_efficientnet_b1_ns.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))["model"]) + model = model_tf_efficientnet_b1_ns.efnet_model + filter_elems = {"se", "act", "bn", "conv"} + preprocessing = functools.partial(load_preprocess_images_custom,preprocess_images=custom_image_preprocess) + wrapper = PytorchWrapper(identifier='my-model', model=model, preprocessing=preprocessing, batch_size=8) + wrapper.image_size = image_crop + return wrapper + + +def get_layers(name): + assert name == 'effnetb1_cutmix_augmix_sam_e1_5avg_424x377' + return ['blocks', 'blocks.0', 'blocks.0.0', 'blocks.0.1', + 'blocks.1', 'blocks.1.0', 'blocks.1.1', 'blocks.1.2', + 'blocks.2', 'blocks.2.0', 'blocks.2.1', 'blocks.2.2', + 'blocks.3', 'blocks.3.0', 'blocks.3.1', 'blocks.3.2', 'blocks.3.3', + 'blocks.4', 'blocks.4.0', 'blocks.4.1', 'blocks.4.2', 'blocks.4.3', + 'blocks.5', 'blocks.5.0', 'blocks.5.1', 'blocks.5.2', 'blocks.5.3', 'blocks.5.4', + 'blocks.6', 'blocks.6.0', 'blocks.6.1', 'global_pool', 'global_pool.flatten', 'global_pool.pool'] + + +def get_bibtex(model_identifier): + return """@InProceedings{pmlr-v97-tan19a, + title = {{E}fficient{N}et: Rethinking Model Scaling for Convolutional Neural Networks}, + author = {Tan, Mingxing and Le, Quoc}, + booktitle = {Proceedings of the 36th International Conference on Machine Learning}, + pages = {6105--6114}, + year = {2019}, + editor = {Chaudhuri, Kamalika and Salakhutdinov, Ruslan}, + volume = {97}, + series = {Proceedings of Machine Learning Research}, + month = {09--15 Jun}, + publisher = {PMLR}, + pdf = {http://proceedings.mlr.press/v97/tan19a/tan19a.pdf}, + url = {https://proceedings.mlr.press/v97/tan19a.html}, + abstract = {Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are given. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves stateof-the-art 84.4% top-1 / 97.1% top-5 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet (Huang et al., 2018). Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flower (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters.} + }""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/requirements.txt b/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/requirements.txt new file mode 100644 index 000000000..128d2bc2d --- /dev/null +++ b/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/requirements.txt @@ -0,0 +1,6 @@ +torch +torchvision +numpy +timm +pillow +albumentations diff --git a/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/test.py b/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/test.py new file mode 100644 index 000000000..16c2bdbb8 --- /dev/null +++ b/brainscore_vision/models/effnetb1_cutmix_augmix_sam_e1_5avg_424x377/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('effnetb1_cutmix_augmix_sam_e1_5avg_424x377') + assert model.identifier == 'effnetb1_cutmix_augmix_sam_e1_5avg_424x377' \ No newline at end of file From 627beab09786e3bbafc6f3eb56604f75658d7b0d Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Wed, 3 Jul 2024 09:35:59 -0400 Subject: [PATCH 34/68] Ep/add effnetb1 cutmixpatch sam robust32 avge6e8e9e10 manylayers 324x288 (#947) * Add effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288 * Update requirements.txt * Update model.py * Move weights to S3 * Fix sha * Update requirements.txt --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .../__init__.py | 5 + .../model.py | 142 ++++++++++++++++++ .../requirements.txt | 5 + .../test.py | 8 + 4 files changed, 160 insertions(+) create mode 100644 brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/__init__.py create mode 100644 brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py create mode 100644 brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/requirements.txt create mode 100644 brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/test.py diff --git a/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/__init__.py b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/__init__.py new file mode 100644 index 000000000..1d48d33d4 --- /dev/null +++ b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/__init__.py @@ -0,0 +1,5 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288'] = lambda: ModelCommitment(identifier='effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288', activations_model=get_model('effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288'), layers=get_layers('effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288')) diff --git a/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py new file mode 100644 index 000000000..41c6ca79e --- /dev/null +++ b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py @@ -0,0 +1,142 @@ +import functools + +import torch +from brainscore_vision.model_helpers.activations import PytorchWrapper, KerasWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from brainscore_vision.model_helpers.s3 import load_weight_file +from PIL import Image +import numpy as np +import timm +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform +import torch.nn as nn +from albumentations import ( + Compose, Normalize, Resize,CenterCrop + ) +from albumentations.pytorch import ToTensorV2 +# This is an example implementation for submitting alexnet as a pytorch model +# If you use pytorch, don't forget to add it to the setup.py + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + +import os + +image_resize = 324 +image_crop = 288 +norm_mean = [0.485, 0.456, 0.406] +norm_std = [0.229, 0.224, 0.225] +freeze_layers = ['blocks.0.0', 'blocks.0.1', 'blocks.1.0', + 'blocks.1.1', 'blocks.1.2', 'blocks.2.0', + 'blocks.2.1', 'blocks.2.2', 'blocks.3.0', 'blocks.3.1', 'blocks.3.2'] + +def custom_image_preprocess(images, **kwargs): + + transforms_val = Compose([ + Resize(image_resize, image_resize), + CenterCrop(image_crop, image_crop), + Normalize(mean=norm_mean,std=norm_std,), + ToTensorV2()]) + + images = [np.array(pillow_image) for pillow_image in images] + images = [transforms_val(image=image)["image"] for image in images] + images = np.stack(images) + + return images + +def load_preprocess_images_custom(image_filepaths, preprocess_images=custom_image_preprocess, **kwargs): + images = [load_image(image_filepath) for image_filepath in image_filepaths] + images = preprocess_images(images, **kwargs) + return images + +def load_image(image_filepath): + with Image.open(image_filepath) as pil_image: + if 'L' not in pil_image.mode.upper() and 'A' not in pil_image.mode.upper()\ + and 'P' not in pil_image.mode.upper(): # not binary and not alpha and not palletized + # work around to https://github.com/python-pillow/Pillow/issues/1144, + # see https://stackoverflow.com/a/30376272/2225200 + return pil_image.copy() + else: # make sure potential binary images are in RGB + rgb_image = Image.new("RGB", pil_image.size) + rgb_image.paste(pil_image) + return rgb_image + +class EffNetBX(nn.Module): + def __init__(self,): + super().__init__ () + self.efnet_model = timm.create_model('tf_efficientnet_b1_ns', pretrained=True) + + def forward(self, x): + x = self.efnet_model(x) + return x + +def get_model(name): + assert name == 'effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288' + model_tf_efficientnet_b1_ns= EffNetBX() + + weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + relative_path="effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/tf_efficientnet_b1_ns_robust_cutmixpatchresize_SAM_e6e8e9e10.pth", + version_id="prSgvyJFh_c7OKQODIEqU_c_hg_YXh5M", + sha1="9d60e49043b2d5354447c46cd011764cc6cf094e") + model_tf_efficientnet_b1_ns.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))["model"]) + model = model_tf_efficientnet_b1_ns.efnet_model + filter_elems = set(["se", "act", "bn", "conv"]) + layer_list = [layer for layer, _ in model.named_modules() if not any(i in layer for i in filter_elems)] + print(layer_list) + print(len(layer_list)) + + for n, m in model.named_modules(): + if isinstance(m, nn.BatchNorm2d) and any(x in n for x in ["conv_stem" ] + freeze_layers) or n =="bn1": + print(f"Freeze {n, m}") + m.eval() + + + preprocessing = functools.partial(load_preprocess_images_custom, + preprocess_images=custom_image_preprocess, + ) + + + wrapper = PytorchWrapper(identifier='my-model', model=model, preprocessing=preprocessing, batch_size=8) + + wrapper.image_size = image_crop + return wrapper + + +def get_layers(name): + assert name == 'effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288' + return ['blocks', 'blocks.0', 'blocks.0.0', 'blocks.0.1', + 'blocks.1', 'blocks.1.0', 'blocks.1.1', 'blocks.1.2', + 'blocks.2', 'blocks.2.0', 'blocks.2.1', 'blocks.2.2', + 'blocks.3', 'blocks.3.0', 'blocks.3.1', 'blocks.3.2', 'blocks.3.3', + 'blocks.4', 'blocks.4.0', + 'blocks.4.0.conv_pw', 'blocks.4.0.conv_dw', 'blocks.4.0.conv_pwl', 'blocks.4.1', 'blocks.4.1.conv_pw', 'blocks.4.1.conv_dw', 'blocks.4.1.conv_pwl', 'blocks.4.2', + 'blocks.4.2.conv_pw', 'blocks.4.2.conv_dw', 'blocks.4.2.conv_pwl', 'blocks.4.3', 'blocks.4.3.conv_pw', 'blocks.4.3.conv_dw', 'blocks.4.3.conv_pwl', 'blocks.5', + 'blocks.5.0', 'blocks.5.0.conv_pw', 'blocks.5.0.conv_dw', 'blocks.5.0.conv_pwl', 'blocks.5.1', 'blocks.5.1.conv_pw', 'blocks.5.1.conv_dw', 'blocks.5.1.conv_pwl', + 'blocks.5.2', 'blocks.5.2.conv_pw', 'blocks.5.2.conv_dw', 'blocks.5.2.conv_pwl', 'blocks.5.3', 'blocks.5.3.conv_pw', 'blocks.5.3.conv_dw', 'blocks.5.3.conv_pwl', + 'blocks.5.4', 'blocks.5.4.conv_pw', 'blocks.5.4.conv_dw', 'blocks.5.4.conv_pwl', 'blocks.6', 'blocks.6.0', 'blocks.6.0.conv_pw', 'blocks.6.0.conv_dw', + 'blocks.6.0.conv_pwl', 'blocks.6.1', 'blocks.6.1.conv_pw', 'blocks.6.1.conv_dw', 'blocks.6.1.conv_pwl', + 'global_pool', 'global_pool.flatten', 'global_pool.pool'] + +def get_bibtex(model_identifier): + return """@InProceedings{pmlr-v97-tan19a, + title = {{E}fficient{N}et: Rethinking Model Scaling for Convolutional Neural Networks}, + author = {Tan, Mingxing and Le, Quoc}, + booktitle = {Proceedings of the 36th International Conference on Machine Learning}, + pages = {6105--6114}, + year = {2019}, + editor = {Chaudhuri, Kamalika and Salakhutdinov, Ruslan}, + volume = {97}, + series = {Proceedings of Machine Learning Research}, + month = {09--15 Jun}, + publisher = {PMLR}, + pdf = {http://proceedings.mlr.press/v97/tan19a/tan19a.pdf}, + url = {https://proceedings.mlr.press/v97/tan19a.html}, + abstract = {Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are given. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves stateof-the-art 84.4% top-1 / 97.1% top-5 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet (Huang et al., 2018). Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flower (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters.} + }""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/requirements.txt b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/requirements.txt new file mode 100644 index 000000000..df0c84353 --- /dev/null +++ b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/requirements.txt @@ -0,0 +1,5 @@ +torch +torchvision +pillow +albumentations +timm diff --git a/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/test.py b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/test.py new file mode 100644 index 000000000..e5b84658a --- /dev/null +++ b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288') + assert model.identifier == 'effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288' \ No newline at end of file From f4961eae9310ac721edc2fda0b7ad07b3d6f101f Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Wed, 3 Jul 2024 09:44:06 -0400 Subject: [PATCH 35/68] Ep/add effnetb1 272x240 (#949) * Add effnetb1_272x240 * Address version issues * Remove versions * Update model.py --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .../models/effnetb1_272x240/__init__.py | 5 + .../models/effnetb1_272x240/model.py | 126 ++++++++++++++++++ .../models/effnetb1_272x240/requirements.txt | 3 + .../models/effnetb1_272x240/test.py | 9 ++ 4 files changed, 143 insertions(+) create mode 100644 brainscore_vision/models/effnetb1_272x240/__init__.py create mode 100644 brainscore_vision/models/effnetb1_272x240/model.py create mode 100644 brainscore_vision/models/effnetb1_272x240/requirements.txt create mode 100644 brainscore_vision/models/effnetb1_272x240/test.py diff --git a/brainscore_vision/models/effnetb1_272x240/__init__.py b/brainscore_vision/models/effnetb1_272x240/__init__.py new file mode 100644 index 000000000..b72d79fec --- /dev/null +++ b/brainscore_vision/models/effnetb1_272x240/__init__.py @@ -0,0 +1,5 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['effnetb1_272x240'] = lambda: ModelCommitment(identifier='effnetb1_272x240', activations_model=get_model('effnetb1_272x240'), layers=get_layers('effnetb1_272x240')) diff --git a/brainscore_vision/models/effnetb1_272x240/model.py b/brainscore_vision/models/effnetb1_272x240/model.py new file mode 100644 index 000000000..83f8c0dc1 --- /dev/null +++ b/brainscore_vision/models/effnetb1_272x240/model.py @@ -0,0 +1,126 @@ +import functools + +import torch +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from PIL import Image +import numpy as np +import timm +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform +import torch.nn as nn +from albumentations import ( + Compose, Normalize, Resize,CenterCrop + ) +from albumentations.pytorch import ToTensorV2 +# This is an example implementation for submitting alexnet as a pytorch model +# If you use pytorch, don't forget to add it to the setup.py + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + +import ssl +ssl._create_default_https_context = ssl._create_unverified_context + +import os + +image_resize = 272 +image_crop = 240 +norm_mean = [0.485, 0.456, 0.406] +norm_std = [0.229, 0.224, 0.225] + +def custom_image_preprocess(images, **kwargs): + + transforms_val = Compose([ + Resize(image_resize, image_resize), + CenterCrop(image_crop, image_crop), + Normalize(mean=norm_mean,std=norm_std,), + ToTensorV2()]) + + images = [np.array(pillow_image) for pillow_image in images] + images = [transforms_val(image=image)["image"] for image in images] + images = np.stack(images) + + return images + +def load_preprocess_images_custom(image_filepaths, preprocess_images=custom_image_preprocess, **kwargs): + images = [load_image(image_filepath) for image_filepath in image_filepaths] + images = preprocess_images(images, **kwargs) + return images + +def load_image(image_filepath): + with Image.open(image_filepath) as pil_image: + if 'L' not in pil_image.mode.upper() and 'A' not in pil_image.mode.upper()\ + and 'P' not in pil_image.mode.upper(): # not binary and not alpha and not palletized + # work around to https://github.com/python-pillow/Pillow/issues/1144, + # see https://stackoverflow.com/a/30376272/2225200 + return pil_image.copy() + else: # make sure potential binary images are in RGB + rgb_image = Image.new("RGB", pil_image.size) + rgb_image.paste(pil_image) + return rgb_image + +class EffNetBX(nn.Module): + def __init__(self,): + super().__init__ () + self.efnet_model = timm.create_model('tf_efficientnet_b1_ns', pretrained=True) + + def forward(self, x): + x = self.efnet_model(x) + return x + +def get_model(name): + assert name == 'effnetb1_272x240' + model_tf_efficientnet_b1_ns= EffNetBX() + + model = model_tf_efficientnet_b1_ns.efnet_model + filter_elems = set(["se", "act", "bn", "conv"]) + layer_list = [layer for layer, _ in model.named_modules() if not any(i in layer for i in filter_elems)] + print(layer_list) + print(len(layer_list)) + + + preprocessing = functools.partial(load_preprocess_images_custom, + preprocess_images=custom_image_preprocess, + ) + + + wrapper = PytorchWrapper(identifier='my-model', model=model, preprocessing=preprocessing, batch_size=8) + + wrapper.image_size = image_crop + return wrapper + + +def get_layers(name): + assert name == 'effnetb1_272x240' + return ['blocks', 'blocks.0', 'blocks.0.0', 'blocks.0.1', + 'blocks.1', 'blocks.1.0', 'blocks.1.1', 'blocks.1.2', + 'blocks.2', 'blocks.2.0', 'blocks.2.1', 'blocks.2.2', + 'blocks.3', 'blocks.3.0', 'blocks.3.1', 'blocks.3.2', 'blocks.3.3', + 'blocks.4', 'blocks.4.0', 'blocks.4.1', 'blocks.4.2', 'blocks.4.3', + 'blocks.5', 'blocks.5.0', 'blocks.5.1', 'blocks.5.2', 'blocks.5.3', 'blocks.5.4', + 'blocks.6', 'blocks.6.0', 'blocks.6.1', 'global_pool', 'global_pool.flatten', 'global_pool.pool'] + +def get_bibtex(model_identifier): + return """@InProceedings{pmlr-v97-tan19a, + title = {{E}fficient{N}et: Rethinking Model Scaling for Convolutional Neural Networks}, + author = {Tan, Mingxing and Le, Quoc}, + booktitle = {Proceedings of the 36th International Conference on Machine Learning}, + pages = {6105--6114}, + year = {2019}, + editor = {Chaudhuri, Kamalika and Salakhutdinov, Ruslan}, + volume = {97}, + series = {Proceedings of Machine Learning Research}, + month = {09--15 Jun}, + publisher = {PMLR}, + pdf = {http://proceedings.mlr.press/v97/tan19a/tan19a.pdf}, + url = {https://proceedings.mlr.press/v97/tan19a.html}, + abstract = {Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are given. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves stateof-the-art 84.4% top-1 / 97.1% top-5 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet (Huang et al., 2018). Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flower (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters.} + }""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/effnetb1_272x240/requirements.txt b/brainscore_vision/models/effnetb1_272x240/requirements.txt new file mode 100644 index 000000000..7c7c83d8e --- /dev/null +++ b/brainscore_vision/models/effnetb1_272x240/requirements.txt @@ -0,0 +1,3 @@ +albumentations +opencv-python-headless +timm diff --git a/brainscore_vision/models/effnetb1_272x240/test.py b/brainscore_vision/models/effnetb1_272x240/test.py new file mode 100644 index 000000000..fedcefdd2 --- /dev/null +++ b/brainscore_vision/models/effnetb1_272x240/test.py @@ -0,0 +1,9 @@ +# Left empty as part of 2023 models migration +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('effnetb1_272x240') + assert model.identifier == 'effnetb1_272x240' \ No newline at end of file From fa4c774643c696d23c53c96c0d4292af463f726b Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Wed, 3 Jul 2024 09:45:35 -0400 Subject: [PATCH 36/68] Add cvt_cvt-w24-384-in22k_finetuned-in1k_4 (#939) * Add cvt_cvt-w24-384-in22k_finetuned-in1k_4 * Fix imports * Update model.py * removing dash from package name * Pin transformers * Change PIL to pillow * Moving up transformers version * Update requirements.txt --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Deirdre Kelliher --- .../__init__.py | 9 ++ .../model.py | 134 ++++++++++++++++++ .../requirements.txt | 4 + .../test.py | 8 ++ 4 files changed, 155 insertions(+) create mode 100644 brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/__init__.py create mode 100644 brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/model.py create mode 100644 brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/requirements.txt create mode 100644 brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/test.py diff --git a/brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/__init__.py b/brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/__init__.py new file mode 100644 index 000000000..7059cbedf --- /dev/null +++ b/brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + + +model_registry['cvt_cvt-w24-384-in22k_finetuned-in1k_4'] = \ + lambda: ModelCommitment(identifier='cvt_cvt-w24-384-in22k_finetuned-in1k_4', + activations_model=get_model('cvt_cvt-w24-384-in22k_finetuned-in1k_4'), + layers=get_layers('cvt_cvt-w24-384-in22k_finetuned-in1k_4')) \ No newline at end of file diff --git a/brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/model.py b/brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/model.py new file mode 100644 index 000000000..1b6b87552 --- /dev/null +++ b/brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/model.py @@ -0,0 +1,134 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +from transformers import AutoFeatureExtractor, CvtForImageClassification +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from PIL import Image +import numpy as np +import torch + +""" +Template module for a base model submission to brain-score +""" + + +def get_model(name): + assert name == 'cvt_cvt-w24-384-in22k_finetuned-in1k_4' + # https://huggingface.co/models?sort=downloads&search=cvt + image_size = 384 + processor = AutoFeatureExtractor.from_pretrained('microsoft/cvt-w24-384-22k') + model = CvtForImageClassification.from_pretrained('microsoft/cvt-w24-384-22k') + preprocessing = functools.partial(load_preprocess_images, processor=processor, image_size=image_size) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = image_size + + return wrapper + + +def get_layers(name): + assert name == 'cvt_cvt-w24-384-in22k_finetuned-in1k_4' + layers = [] + layers += [f'cvt.encoder.stages.0.layers.{i}' for i in range(2)] + layers += [f'cvt.encoder.stages.1.layers.{i}' for i in range(2)] + layers += [f'cvt.encoder.stages.2.layers.{i}' for i in range(20)] + layers += ['layernorm'] + return layers + + +def get_bibtex(model_identifier): + """ + A method returning the bibtex reference of the requested model as a string. + """ + return '' + + +def load_preprocess_images(image_filepaths, image_size, processor=None, **kwargs): + images = load_images(image_filepaths) + # images = [, ...] + images = [image.resize((image_size, image_size)) for image in images] + if processor is not None: + images = [processor(images=image, return_tensors="pt", **kwargs) for image in images] + if len(images[0].keys()) != 1: + raise NotImplementedError(f'unknown processor for getting model {processor}') + assert list(images[0].keys())[0] == 'pixel_values' + images = [image['pixel_values'] for image in images] + images = torch.cat(images) + images = images.cpu().numpy() + else: + images = preprocess_images(images, image_size=image_size, **kwargs) + return images + + +def load_images(image_filepaths): + return [load_image(image_filepath) for image_filepath in image_filepaths] + + +def load_image(image_filepath): + with Image.open(image_filepath) as pil_image: + if 'L' not in pil_image.mode.upper() and 'A' not in pil_image.mode.upper() \ + and 'P' not in pil_image.mode.upper(): # not binary and not alpha and not palletized + # work around to https://github.com/python-pillow/Pillow/issues/1144, + # see https://stackoverflow.com/a/30376272/2225200 + return pil_image.copy() + else: # make sure potential binary images are in RGB + rgb_image = Image.new("RGB", pil_image.size) + rgb_image.paste(pil_image) + return rgb_image + + +def preprocess_images(images, image_size, **kwargs): + preprocess = torchvision_preprocess_input(image_size, **kwargs) + images = [preprocess(image) for image in images] + images = np.concatenate(images) + return images + + +def torchvision_preprocess_input(image_size, **kwargs): + from torchvision import transforms + return transforms.Compose([ + transforms.Resize((image_size, image_size)), + torchvision_preprocess(**kwargs), + ]) + + +def torchvision_preprocess(normalize_mean=(0.485, 0.456, 0.406), normalize_std=(0.229, 0.224, 0.225)): + from torchvision import transforms + return transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=normalize_mean, std=normalize_std), + lambda img: img.unsqueeze(0) + ]) + + +def create_static_video(image, num_frames, normalize_0to1=False, channel_dim=3): + ''' + Create a static video with the same image in all frames. + Args: + image (PIL.Image.Image): Input image. + num_frames (int): Number of frames in the video. + Returns: + result (np.ndarray): np array of frames of shape (num_frames, height, width, 3). + ''' + frames = [] + for _ in range(num_frames): + frame = np.array(image) + if normalize_0to1: + frame = frame / 255. + if channel_dim == 1: + frame = frame.transpose(2, 0, 1) + frames.append(frame) + return np.stack(frames) + + +if __name__ == '__main__': + # Use this method to ensure the correctness of the BaseModel implementations. + # It executes a mock run of brain-score benchmarks. + check_models.check_base_models(__name__) + +""" +Notes on the error: + +- 'channel_x' key error: +# 'embeddings.patch_embeddings.projection', +https://github.com/search?q=repo%3Abrain-score%2Fmodel-tools%20channel_x&type=code + +""" diff --git a/brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/requirements.txt b/brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/requirements.txt new file mode 100644 index 000000000..b71f00d65 --- /dev/null +++ b/brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/requirements.txt @@ -0,0 +1,4 @@ +numpy +torch +transformers==4.30.2 +pillow diff --git a/brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/test.py b/brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/test.py new file mode 100644 index 000000000..c48b4a7e7 --- /dev/null +++ b/brainscore_vision/models/cvt_cvt_w24_384_in22k_finetuned_in1k_4/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('cvt_cvt-w24-384-in22k_finetuned-in1k_4') + assert model.identifier == 'cvt_cvt-w24-384-in22k_finetuned-in1k_4' \ No newline at end of file From 29fa9ae1113d9b48336042c11176cceed27fb904 Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Wed, 3 Jul 2024 13:35:31 -0400 Subject: [PATCH 37/68] Add model for resnext101_32x8d_wsl (#938) * Add model files * Address comments --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .../models/resnext101_32x8d_wsl/__init__.py | 4 +-- .../models/resnext101_32x8d_wsl/model.py | 29 +++++++++---------- .../models/resnext101_32x8d_wsl/test.py | 2 +- 3 files changed, 16 insertions(+), 19 deletions(-) diff --git a/brainscore_vision/models/resnext101_32x8d_wsl/__init__.py b/brainscore_vision/models/resnext101_32x8d_wsl/__init__.py index 6cb3b294d..14cca1a00 100644 --- a/brainscore_vision/models/resnext101_32x8d_wsl/__init__.py +++ b/brainscore_vision/models/resnext101_32x8d_wsl/__init__.py @@ -3,5 +3,5 @@ from .model import get_model, get_layers model_registry['resnext101_32x8d_wsl'] = lambda: ModelCommitment(identifier='resnext101_32x8d_wsl', - activations_model=get_model('resnext101_32x8d_wsl'), - layers=get_layers('resnext101_32x8d_wsl')) \ No newline at end of file + activations_model=get_model(), + layers=get_layers()) \ No newline at end of file diff --git a/brainscore_vision/models/resnext101_32x8d_wsl/model.py b/brainscore_vision/models/resnext101_32x8d_wsl/model.py index 18e89e7a9..3c4fef444 100644 --- a/brainscore_vision/models/resnext101_32x8d_wsl/model.py +++ b/brainscore_vision/models/resnext101_32x8d_wsl/model.py @@ -9,8 +9,7 @@ ssl._create_default_https_context = ssl._create_unverified_context -def get_model(name): - assert name == 'resnext101_32x8d_wsl' +def get_model(): model_identifier = "resnext101_32x8d_wsl" model = torch.hub.load('facebookresearch/WSL-Images', model_identifier) preprocessing = functools.partial(load_preprocess_images, image_size=224) @@ -21,8 +20,7 @@ def get_model(name): return wrapper -def get_layers(name): - assert name == 'resnext101_32x8d_wsl' +def get_layers(): return (['conv1'] + # note that while relu is used multiple times, by default the last one will overwrite all previous ones [f"layer{block + 1}.{unit}.relu" @@ -31,17 +29,16 @@ def get_layers(name): def get_bibtex(model_identifier): - """ - A method returning the bibtex reference of the requested model as a string. - """ - return """@inproceedings{mahajan2018exploring, - title={Exploring the limits of weakly supervised pretraining}, - author={Mahajan, Dhruv and Girshick, Ross and Ramanathan, Vignesh and He, Kaiming and Paluri, Manohar and Li, Yixuan and Bharambe, Ashwin and Van Der Maaten, Laurens}, - booktitle={Proceedings of the European conference on computer vision (ECCV)}, - pages={181--196}, - year={2018} - }""" - + """ + A method returning the bibtex reference of the requested model as a string. + """ + return """@inproceedings{mahajan2018exploring, + title={Exploring the limits of weakly supervised pretraining}, + author={Mahajan, Dhruv and Girshick, Ross and Ramanathan, Vignesh and He, Kaiming and Paluri, Manohar and Li, Yixuan and Bharambe, Ashwin and Van Der Maaten, Laurens}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={181--196}, + year={2018} + }""" if __name__ == '__main__': - check_models.check_base_models(__name__) \ No newline at end of file + check_models.check_base_models(__name__) \ No newline at end of file diff --git a/brainscore_vision/models/resnext101_32x8d_wsl/test.py b/brainscore_vision/models/resnext101_32x8d_wsl/test.py index a79c78097..25af51b92 100644 --- a/brainscore_vision/models/resnext101_32x8d_wsl/test.py +++ b/brainscore_vision/models/resnext101_32x8d_wsl/test.py @@ -5,4 +5,4 @@ @pytest.mark.travis_slow def test_has_identifier(): model = brainscore_vision.load_model('resnext101_32x8d_wsl') - assert model.identifier == 'resnext101_32x8d_wsl' \ No newline at end of file + assert model.identifier == 'resnext101_32x8d_wsl' From 64ffcda5f0e615e606e9b9d274d0d6e6a03aff6e Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Wed, 3 Jul 2024 13:36:09 -0400 Subject: [PATCH 38/68] Add resnet-50-robust (#957) * Add resnet-50-robust * removing dash from package name * Remove get_model_list, update weight version_id * Update requirements.txt * Re-trigger ReadtheDocs * Re-trigger ReadtheDocs --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Deirdre Kelliher --- .../models/resnet_50_robust/__init__.py | 7 +++ .../models/resnet_50_robust/model.py | 55 +++++++++++++++++++ .../models/resnet_50_robust/requirements.txt | 3 + .../models/resnet_50_robust/test.py | 8 +++ 4 files changed, 73 insertions(+) create mode 100644 brainscore_vision/models/resnet_50_robust/__init__.py create mode 100644 brainscore_vision/models/resnet_50_robust/model.py create mode 100644 brainscore_vision/models/resnet_50_robust/requirements.txt create mode 100644 brainscore_vision/models/resnet_50_robust/test.py diff --git a/brainscore_vision/models/resnet_50_robust/__init__.py b/brainscore_vision/models/resnet_50_robust/__init__.py new file mode 100644 index 000000000..0946d7f4e --- /dev/null +++ b/brainscore_vision/models/resnet_50_robust/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['resnet-50-robust'] = lambda: ModelCommitment(identifier='resnet-50-robust', + activations_model=get_model('resnet-50-robust'), + layers=get_layers('resnet-50-robust')) diff --git a/brainscore_vision/models/resnet_50_robust/model.py b/brainscore_vision/models/resnet_50_robust/model.py new file mode 100644 index 000000000..83b17d049 --- /dev/null +++ b/brainscore_vision/models/resnet_50_robust/model.py @@ -0,0 +1,55 @@ +import functools +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +import torch +from importlib import import_module +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from brainscore_vision.model_helpers.check_submission import check_models +import ssl +from brainscore_vision.model_helpers.s3 import load_weight_file + +ssl._create_default_https_context = ssl._create_unverified_context + + +def get_model(name): + assert name == 'resnet-50-robust' + module = import_module(f'torchvision.models') + model_ctr = getattr(module, 'resnet50') + model = model_ctr() + preprocessing = functools.partial(load_preprocess_images, image_size=224) + weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + relative_path="resnet-50-robust/ImageNet.pt", + version_id=".shHB0L_L9L3Mtco0Kf4EBP3Xj9nLKnC", + sha1="cc6e4441abc8ad6d2f4da5db84836e544bfb53fd") + checkpoint = torch.load(weights_path, map_location=torch.device('cpu')) + + # process weights -- remove the attacker and prepocessing weights + weights = checkpoint['model'] + weights = {k[len('module.model.'):]: v for k, v in weights.items() if 'attacker' not in k} + weights = {k: weights[k] for k in list(weights.keys())[2:]} + model.load_state_dict(weights) + # wrap model with pytorch wrapper + wrapper = PytorchWrapper(identifier='resnet50', model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'resnet-50-robust' + layers = ( + ['conv1'] + + ['layer1.0.conv3', 'layer1.1.conv3', 'layer1.2.conv3'] + + ['layer2.0.downsample.0', 'layer2.1.conv3', 'layer2.2.conv3', 'layer2.3.conv3'] + + ['layer3.0.downsample.0', 'layer3.1.conv3', 'layer3.2.conv3', 'layer3.3.conv3', + 'layer3.4.conv3', 'layer3.5.conv3'] + + ['layer4.0.downsample.0', 'layer4.1.conv3', 'layer4.2.conv3'] + + ['avgpool'] + ) + return layers + + +def get_bibtex(model_identifier): + return """""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/resnet_50_robust/requirements.txt b/brainscore_vision/models/resnet_50_robust/requirements.txt new file mode 100644 index 000000000..7c275e68d --- /dev/null +++ b/brainscore_vision/models/resnet_50_robust/requirements.txt @@ -0,0 +1,3 @@ +torchvision +torch +importlib diff --git a/brainscore_vision/models/resnet_50_robust/test.py b/brainscore_vision/models/resnet_50_robust/test.py new file mode 100644 index 000000000..25edc8f74 --- /dev/null +++ b/brainscore_vision/models/resnet_50_robust/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('resnet-50-robust') + assert model.identifier == 'resnet-50-robust' \ No newline at end of file From 9d963f1a6daea4794c4c07faee86b17b9ae10f0f Mon Sep 17 00:00:00 2001 From: Ben Lonnqvist Date: Sat, 6 Jul 2024 09:18:26 +0200 Subject: [PATCH 39/68] Malania2007 benchmark (#365) * Initial Malania 2007 commit with preliminary threshold metric, the benchmark, packaging and a few tests. * Add correct visual angle * clean up threshold metric file * Further threshold cleanup and ceiling compute verification * Remove python 3.8 requirement that came from typing.Literal * clean up benchmark file * add assembly, stimulus, and benchmark general tests * resolve Islam2021 conflict * Add grid search, correct psychometric function, and pooled score * Standardized vernier train and test set sizes * add goodness of fit check to discard fits to random responses * update ceilings to pooled expectations * packaged commit * add Malania2007 entries to lookup.csv * add Malania2007 to benchmark init * fix indexing error in test___init__ * add missing year(s) in identifiers where they were missing * add S3-related tests to private access, as Travis throws a NoCredentialsError on trying to access them * minor cleanup of threshold * add a few comments to threshold functions * fix typo in precomputed feature test * fix typos in dataset and assembly call functions * Update Assembly type and name * update scoring method with comment and update ceilings * update benchmark to 2.0 format w/ local test * update data registry format * remove BIBTEX from threshold __init__ * update metric imports to 2.0 * re-add test_stimuli.py deleted by accident * change nan filtering to subject_unique_id filtering * add require_variance to model call * update stimulus and data assembly related information with arcane knowledge collected from years of hard work * more arcane knowledge, it's endless * move packaging files * add packaging files * modify packaging paths * remove redundant files * remove redundant import * fix stimulus set / assembly link * fix stimulus set / assembly indexing * add image label to threshold elevation calculation * change stimulus numbers in the test to be what they should be * add calls to require_variance * fix test errors * fix bug with ceiling access * correct test with incorrect nan droppign * fix wrong number of stimuli in the vernier only test * add comment to explain the logic behind the scoring function * remove redundant comment * remove pool score * add metric tests * fix benchmark filtering issue and recompute ceilings to accommodate removal of pool_score * fix superfluous test by switching it for another * add vernier acuity benchmark * update vernier acuity benchmark, ceilings, and mean ceiling * clean up benchmark file * fix a few bugs with loading benchmarks and such * fix some happy little bugs * add alexnet test * fix dataset argument error * Apply suggestions from code review Co-authored-by: Martin Schrimpf * add renaming suggestions * fix naming issues * remove out-dated aggregation dim in test * fix identifier testing --------- Co-authored-by: Martin Schrimpf --- .../benchmarks/malania2007/__init__.py | 13 + .../benchmarks/malania2007/benchmark.py | 224 ++++++++ .../benchmarks/malania2007/test.py | 63 +++ .../data/malania2007/__init__.py | 254 +++++++++ .../data_packaging/malania_data_assembly.py | 79 +++ .../data_packaging/malania_stimulus_set.py | 79 +++ brainscore_vision/data/malania2007/test.py | 147 ++++++ .../metrics/threshold/__init__.py | 5 + brainscore_vision/metrics/threshold/metric.py | 482 ++++++++++++++++++ brainscore_vision/metrics/threshold/test.py | 71 +++ 10 files changed, 1417 insertions(+) create mode 100644 brainscore_vision/benchmarks/malania2007/__init__.py create mode 100644 brainscore_vision/benchmarks/malania2007/benchmark.py create mode 100644 brainscore_vision/benchmarks/malania2007/test.py create mode 100644 brainscore_vision/data/malania2007/__init__.py create mode 100644 brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py create mode 100644 brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py create mode 100644 brainscore_vision/data/malania2007/test.py create mode 100644 brainscore_vision/metrics/threshold/__init__.py create mode 100644 brainscore_vision/metrics/threshold/metric.py create mode 100644 brainscore_vision/metrics/threshold/test.py diff --git a/brainscore_vision/benchmarks/malania2007/__init__.py b/brainscore_vision/benchmarks/malania2007/__init__.py new file mode 100644 index 000000000..7c6a3bbd1 --- /dev/null +++ b/brainscore_vision/benchmarks/malania2007/__init__.py @@ -0,0 +1,13 @@ +from brainscore_vision import benchmark_registry +from . import benchmark + +benchmark_registry['Malania2007.short2-threshold_elevation'] = lambda: benchmark._Malania2007Base('short2') +benchmark_registry['Malania2007.short4-threshold_elevation'] = lambda: benchmark._Malania2007Base('short4') +benchmark_registry['Malania2007.short6-threshold_elevation'] = lambda: benchmark._Malania2007Base('short6') +benchmark_registry['Malania2007.short8-threshold_elevation'] = lambda: benchmark._Malania2007Base('short8') +benchmark_registry['Malania2007.short16-threshold_elevation'] = lambda: benchmark._Malania2007Base('short16') +benchmark_registry['Malania2007.equal2-threshold_elevation'] = lambda: benchmark._Malania2007Base('equal2') +benchmark_registry['Malania2007.long2-threshold_elevation'] = lambda: benchmark._Malania2007Base('long2') +benchmark_registry['Malania2007.equal16-threshold_elevation'] = lambda: benchmark._Malania2007Base('equal16') +benchmark_registry['Malania2007.long16-threshold_elevation'] = lambda: benchmark._Malania2007Base('long16') +benchmark_registry['Malania2007.vernieracuity-threshold'] = lambda: benchmark._Malania2007VernierAcuity() diff --git a/brainscore_vision/benchmarks/malania2007/benchmark.py b/brainscore_vision/benchmarks/malania2007/benchmark.py new file mode 100644 index 000000000..7ad587b4d --- /dev/null +++ b/brainscore_vision/benchmarks/malania2007/benchmark.py @@ -0,0 +1,224 @@ +from typing import Tuple +import numpy as np + +import brainscore_vision +from brainio.assemblies import PropertyAssembly +from brainscore_vision.benchmarks import BenchmarkBase +from brainscore_vision.benchmark_helpers.screen import place_on_screen +from brainscore_vision import load_metric +from brainscore_vision.model_interface import BrainModel +from brainscore_vision.utils import LazyLoad +from brainscore_core.metrics import Score + + +BIBTEX = """@article{malania2007, + author = {Malania, Maka and Herzog, Michael H. and Westheimer, Gerald}, + title = "{Grouping of contextual elements that affect vernier thresholds}", + journal = {Journal of Vision}, + volume = {7}, + number = {2}, + pages = {1-1}, + year = {2007}, + issn = {1534-7362}, + doi = {10.1167/7.2.1}, + url = {https://doi.org/10.1167/7.2.1} + }""" + +BASELINE_CONDITION = 'vernier_only' +DATASETS = ['short2-threshold_elevation', 'short4-threshold_elevation', 'short6-threshold_elevation', + 'short8-threshold_elevation', 'short16-threshold_elevation', 'equal2-threshold_elevation', + 'long2-threshold_elevation', 'equal16-threshold_elevation', 'long16-threshold_elevation', + 'vernieracuity-threshold'] +# Values in NUM_FLANKERS_PER_CONDITION denote the condition (i.e., in this case the number of flankers) to be selected +# This is kept track of simply because the benchmark uses threshold elevation - i.e., a comparison of 2 conditions +NUM_FLANKERS_PER_CONDITION = {'short2': 2, 'short4': 4, 'short6': 6, 'short8': 8, + 'short16': 16, 'equal2': 2, 'long2': 2, 'equal16': 16, + 'long16': 16, 'vernier_only': 0} + + +class _Malania2007Base(BenchmarkBase): + """ + INFORMATION: + + Benchmark DATASETS should be considered as independent. This means that participant-specific across-condition data + should only ever be compared using the 'subject_unique_id'. In some conditions (short-2, vernier_only, short-16) + an additional observer was added from the original paper's plots. This is because in these conditions, two + experiments were independently conducted, and 1 additional observer that was non-overlapping between the + experiments was added to the aggregate benchmark. + + While humans and models are performing the same testing task in this benchmark, there are a number of choices + that are made in this benchmark that make minor deviations from the human experiment. The choices that make + deviations from the human experiment are listed below alongside the reason for why the departure was made, + and what the 'precisely faithful' alternative would be. + + Benchmark Choices: + + 1) The number and type of fitting stimuli are unfounded choices. Currently, the number of fitting stimuli is chosen + to be relatively large, and hopefully sufficient for decoding in the baseline condition in general. + - Precisely faithful alternative: Present text instructions to models as they were presented to humans + * Why not this alternative? Since the experiment is about early visual perception, and there are currently + few/no models capable of a task like this, it would not be interesting. + - Somewhat faithful alternative: Present a smaller number of training stimuli, motivated by work like + Lee & DiCarlo (2023), biorXiv (doi:https://doi.org/10.1101/2022.12.31.522402). + * Why not this alternative? Since the experiment is not about perceptual learning but about early visual + perception, and there are few/no models capable of a task like this, it would not be interesting. + - Importantly, this means the benchmark examines the models' capability to support a task like this, rather than + their capability to learn a task like this. + 2) In the human experiment, stimuli were presented at exactly the foveal position. In the model experiment, + testing stimuli are presented at exactly the foveal position +- 72arcsec = 0.02deg. + * Why this alternative? Since most models evaluated are test-time deterministic, we want a more precise + estimate of the threshold than a point estimate. Since human microsaccades of small distances are generally + uncontrolled and uncontrollable for (e.g., up to 360arcsec = 6arcmin = 0.1 deg), we believe the tiny jitter + of 0.02deg to have no impact at all on the comparison under study, while improving the precision of threshold + estimates. + + """ + def __init__(self, condition: str): + self.baseline_condition = BASELINE_CONDITION + self.condition = condition + + # since this benchmark compares threshold elevation against a baseline, we omit one subject + # in some conditions in which that subject did not perform both the baseline and the test + # condition + baseline_assembly = LazyLoad(lambda: load_assembly(self.baseline_condition)) + condition_assembly = LazyLoad(lambda: load_assembly(self.condition)) + self._assembly, self._baseline_assembly = filter_baseline_subjects(condition_assembly, + baseline_assembly) + + self._assemblies = {'baseline_assembly': self._baseline_assembly, + 'condition_assembly': self._assembly} + self._stimulus_set = brainscore_vision.load_stimulus_set(f'Malania2007.{self.condition}'.rstrip('-threshold_elevation')) + self._baseline_stimulus_set = brainscore_vision.load_stimulus_set(f'Malania2007.{self.baseline_condition}'.rstrip('-threshold_elevation')) + self._stimulus_sets = {self.condition: self._stimulus_set, + self.baseline_condition: self._baseline_stimulus_set} + self._fitting_stimuli = brainscore_vision.load_stimulus_set(f'Malania2007.{self.condition}'.rstrip('-threshold_elevation') + '_fit') + + self._metric = load_metric('threshold_elevation', + independent_variable='image_label', + baseline_condition=self.baseline_condition, + test_condition=self.condition, + threshold_accuracy=0.75) + + self._visual_degrees = 2.986667 + self._number_of_trials = 10 # arbitrary choice for microsaccades to improve precision of estimates + + super(_Malania2007Base, self).__init__( + identifier=f'Malania2007.{condition}', version=1, + ceiling_func=lambda: self._metric.ceiling(self._assemblies), + parent='Malania2007', + bibtex=BIBTEX) + + def __call__(self, candidate: BrainModel): + model_responses = {} + candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli=self._fitting_stimuli, + number_of_trials=2, require_variance=True) + for condition in (self.baseline_condition, self.condition): + stimulus_set = place_on_screen( + self._stimulus_sets[condition], + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees + ) + model_responses[condition] = candidate.look_at(stimulus_set, number_of_trials=self._number_of_trials, + require_variance=True) + + raw_score = self._metric(model_responses, self._assemblies) + + # Adjust score to ceiling + ceiling = self.ceiling + score = raw_score / ceiling + + score.attrs['raw'] = raw_score + score.attrs['ceiling'] = ceiling + return score + + +class _Malania2007VernierAcuity(BenchmarkBase): + def __init__(self): + self.baseline_condition = BASELINE_CONDITION + self.conditions = DATASETS.copy() + self.conditions.remove('vernieracuity-threshold') + + self._assemblies = {condition: {'baseline_assembly': self.get_assemblies(condition)['baseline_assembly'], + 'condition_assembly': self.get_assemblies(condition)['condition_assembly']} + for condition in self.conditions} + self._stimulus_set = brainscore_vision.load_stimulus_set(f'Malania2007.{self.baseline_condition}') + self._fitting_stimuli = {condition: brainscore_vision.load_stimulus_set(f'Malania2007.{condition}'.rstrip('-threshold_elevation') + '_fit') + for condition in self.conditions} + + self._metric = load_metric('threshold', + independent_variable='image_label', + threshold_accuracy=0.75) + + self._visual_degrees = 2.986667 + self._number_of_trials = 10 # arbitrary choice for microsaccades to improve precision of estimates + + super(_Malania2007VernierAcuity, self).__init__( + identifier=f'Malania2007.vernieracuity-threshold', version=1, + ceiling_func=lambda: self.mean_ceiling(), + parent='Malania2007', + bibtex=BIBTEX) + + def __call__(self, candidate: BrainModel): + scores = [] + for condition in self.conditions: + candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli=self._fitting_stimuli[condition], + number_of_trials=2, require_variance=True) + stimulus_set = place_on_screen( + self._stimulus_set, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees + ) + model_response = candidate.look_at(stimulus_set, number_of_trials=self._number_of_trials, + require_variance=True) + + raw_score = self._metric(model_response, self._assemblies[condition]) + # Adjust score to ceiling + ceiling = self.ceiling + score = raw_score / ceiling + score.attrs['error'] = raw_score.error + + score.attrs['raw'] = raw_score + score.attrs['ceiling'] = ceiling + scores.append(score) + # average all scores to get 1 average score + mean_score = Score(np.mean(scores)) + mean_score.attrs['error'] = np.mean([score.error for score in scores]) + return mean_score + + def get_assemblies(self, condition: str): + condition = condition.rstrip('-threshold_elevation') + baseline_assembly = LazyLoad(lambda: load_assembly(self.baseline_condition)) + condition_assembly = LazyLoad(lambda: load_assembly(condition)) + assembly, baseline_assembly = filter_baseline_subjects(condition_assembly, + baseline_assembly) + return {'condition_assembly': assembly, + 'baseline_assembly': baseline_assembly} + + def mean_ceiling(self): + ceilings = [] + errors = [] + for assembly_name in self._assemblies.keys(): + this_ceiling = self._metric.ceiling(self._assemblies[assembly_name]['baseline_assembly']) + ceilings.append(this_ceiling.values) + errors.append(this_ceiling.error) + mean_ceiling = Score(np.mean(ceilings)) + mean_ceiling.attrs['error'] = np.mean(errors) + return mean_ceiling + + +def load_assembly(dataset: str) -> PropertyAssembly: + assembly = brainscore_vision.load_dataset(f'Malania2007.{dataset}') + return assembly + + +def filter_baseline_subjects(condition_assembly: PropertyAssembly, + baseline_assembly: PropertyAssembly + ) -> Tuple[PropertyAssembly, PropertyAssembly]: + """A function to select only the unique subjects that exist in the condition_assembly.""" + non_nan_mask = ~np.isnan(condition_assembly.values) + unique_ids = condition_assembly.coords['subject'][non_nan_mask].values.tolist() + + mask = baseline_assembly.coords['subject'].isin(unique_ids) + filtered_baseline_assembly = baseline_assembly.where(mask, drop=True) + filtered_condition_assembly = condition_assembly.where(mask, drop=True) + return filtered_condition_assembly, filtered_baseline_assembly diff --git a/brainscore_vision/benchmarks/malania2007/test.py b/brainscore_vision/benchmarks/malania2007/test.py new file mode 100644 index 000000000..8a8ca75cd --- /dev/null +++ b/brainscore_vision/benchmarks/malania2007/test.py @@ -0,0 +1,63 @@ +import numpy as np +import pytest +from pytest import approx + +from brainscore_vision import benchmark_registry, load_benchmark, load_model +from brainscore_vision.benchmarks.malania2007.benchmark import DATASETS + + +class TestBehavioral: + def test_count(self): + assert len(DATASETS) == 5 + 2 + 2 + 1 + + @pytest.mark.parametrize('dataset', DATASETS) + def test_in_pool(self, dataset): + identifier = f"Malania2007.{dataset}" + assert identifier in benchmark_registry + + @pytest.mark.private_access + def test_mean_ceiling(self): + benchmarks = [f"Malania2007.{dataset}" for dataset in DATASETS] + benchmarks = [load_benchmark(benchmark) for benchmark in benchmarks] + ceilings = [benchmark.ceiling for benchmark in benchmarks] + mean_ceiling = np.mean(ceilings) + assert mean_ceiling == approx(0.5757928329186803, abs=0.001) + + # these test values are for the pooled score ceiling + @pytest.mark.private_access + @pytest.mark.parametrize('dataset, expected_ceiling', [ + ('short2-threshold_elevation', approx(0.78719345, abs=0.001)), + ('short4-threshold_elevation', approx(0.49998989, abs=0.001)), + ('short6-threshold_elevation', approx(0.50590051, abs=0.001)), + ('short8-threshold_elevation', approx(0.4426336, abs=0.001)), + ('short16-threshold_elevation', approx(0.8383443, abs=0.001)), + ('equal2-threshold_elevation', approx(0.56664015, abs=0.001)), + ('long2-threshold_elevation', approx(0.46470421, abs=0.001)), + ('equal16-threshold_elevation', approx(0.44087153, abs=0.001)), + ('long16-threshold_elevation', approx(0.50996587, abs=0.001)), + ('vernieracuity-threshold', approx(0.70168481, abs=0.001)) + ]) + def test_dataset_ceiling(self, dataset, expected_ceiling): + benchmark = f"Malania2007.{dataset}" + benchmark = load_benchmark(benchmark) + ceiling = benchmark.ceiling + assert ceiling == expected_ceiling + + @pytest.mark.parametrize('dataset, expected_score', [ + ('short2-threshold_elevation', approx(0.0, abs=0.001)), + ('short4-threshold_elevation', approx(0.0, abs=0.001)), + ('short6-threshold_elevation', approx(0.0, abs=0.001)), + ('short8-threshold_elevation', approx(0.0, abs=0.001)), + ('short16-threshold_elevation', approx(0.0, abs=0.001)), + ('equal2-threshold_elevation', approx(0.0, abs=0.001)), + ('long2-threshold_elevation', approx(0.0, abs=0.001)), + ('equal16-threshold_elevation', approx(0.0, abs=0.001)), + ('long16-threshold_elevation', approx(0.0, abs=0.001)), + ('vernieracuity-threshold', approx(0.0, abs=0.001)) + ]) + def test_model_score(self, dataset, expected_score): + benchmark = f"Malania2007.{dataset}" + benchmark = load_benchmark(benchmark) + model = load_model('alexnet') + model_score = benchmark(model) + assert model_score.values == expected_score diff --git a/brainscore_vision/data/malania2007/__init__.py b/brainscore_vision/data/malania2007/__init__.py new file mode 100644 index 000000000..e6ecbb5cd --- /dev/null +++ b/brainscore_vision/data/malania2007/__init__.py @@ -0,0 +1,254 @@ +from brainio.assemblies import PropertyAssembly + +from brainscore_vision import data_registry, stimulus_set_registry, load_stimulus_set +from brainscore_vision.data_helpers.s3 import load_assembly_from_s3, load_stimulus_set_from_s3 + + +BIBTEX = """@article{malania2007, + author = {Malania, Maka and Herzog, Michael H. and Westheimer, Gerald}, + title = "{Grouping of contextual elements that affect vernier thresholds}", + journal = {Journal of Vision}, + volume = {7}, + number = {2}, + pages = {1-1}, + year = {2007}, + issn = {1534-7362}, + doi = {10.1167/7.2.1}, + url = {https://doi.org/10.1167/7.2.1} + }""" + + +data_registry['Malania2007.equal2'] = lambda: load_assembly_from_s3( + identifier='Malania2007_equal-2', + version_id="yFXK8xjGjEmuYTSfS58rGS_ah3.NGg0X", + sha1="277b2fbffed00e16b6a69b488f73eeda5abaaf10", + bucket="brainio-brainscore", + cls=PropertyAssembly, + stimulus_set_loader=None, +) +data_registry['Malania2007.equal16'] = lambda: load_assembly_from_s3( + identifier='Malania2007_equal-16', + version_id="SRZ7bs.Ek59GkeS084Pvdy38uTzFs4yw", + sha1="ef49506238e8d2554918b113fbc60c133077186e", + bucket="brainio-brainscore", + cls=PropertyAssembly, + stimulus_set_loader=None, +) +data_registry['Malania2007.long2'] = lambda: load_assembly_from_s3( + identifier='Malania2007_long-2', + version_id="2c1lWuXthb3rymB3seTQX1jVqiKUTn1f", + sha1="9076a5b693948c4992b6c8e753f04a7acd2014a1", + bucket="brainio-brainscore", + cls=PropertyAssembly, + stimulus_set_loader=None, +) +data_registry['Malania2007.long16'] = lambda: load_assembly_from_s3( + identifier='Malania2007_long-16', + version_id="qshNxhxjgusWyWiXnbfFN6gqjLgRh8fO", + sha1="3106cf1f2fa9e66617ebf231df05d29077fc478f", + bucket="brainio-brainscore", + cls=PropertyAssembly, + stimulus_set_loader=None, +) +data_registry['Malania2007.short2'] = lambda: load_assembly_from_s3( + identifier='Malania2007_short-2', + version_id="8CQ9MupuljAgkkKUXs3hiOliHg8xoDxb", + sha1="85fb65ad76de48033c704b9c5689771e1ea0457d", + bucket="brainio-brainscore", + cls=PropertyAssembly, + stimulus_set_loader=None, +) +data_registry['Malania2007.short4'] = lambda: load_assembly_from_s3( + identifier='Malania2007_short-4', + version_id=".ZUO0upSfQrWLPgd4oGwAaCbN4bz6S6H", + sha1="75506be9a26ec38a223e41510f1a8cb32d5b0bc9", + bucket="brainio-brainscore", + cls=PropertyAssembly, + stimulus_set_loader=None, +) +data_registry['Malania2007.short6'] = lambda: load_assembly_from_s3( + identifier='Malania2007_short-6', + version_id="q4FugpNGkT_FQP..qIVzye83hAQR2xfS", + sha1="2901be6b352e67550da040d79d744819365b8626", + bucket="brainio-brainscore", + cls=PropertyAssembly, + stimulus_set_loader=None, +) +data_registry['Malania2007.short8'] = lambda: load_assembly_from_s3( + identifier='Malania2007_short-8', + version_id="4_lcRl_I7Mp0RHxcfqZ9tkAZjVh.5oMU", + sha1="6daf47b086cb969d75222e320f49453ed8437885", + bucket="brainio-brainscore", + cls=PropertyAssembly, + stimulus_set_loader=None, +) +data_registry['Malania2007.short16'] = lambda: load_assembly_from_s3( + identifier='Malania2007_short-16', + version_id="fFqEIyIC9CHzqTEmv0MitjCgpeMX5pxJ", + sha1="8ae0898caad718b747f85fce5888416affc3a569", + bucket="brainio-brainscore", + cls=PropertyAssembly, + stimulus_set_loader=None, +) +data_registry['Malania2007.vernier_only'] = lambda: load_assembly_from_s3( + identifier='Malania2007_vernier-only', + version_id="JLWf2pIR_UadQHqwtegJkC6XzWdbSNGi", + sha1="1cf83e8b6141f8b0d67ea46994f342325f62001f", + bucket="brainio-brainscore", + cls=PropertyAssembly, + stimulus_set_loader=None, +) + + +stimulus_set_registry['Malania2007.equal2'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_equal-2', + bucket="brainio-brainscore", + csv_sha1="77e94b9b5122a83ebbaffb4a06fcab68ef652751", + zip_sha1="99826d459f6920dafab72eed69eb2a90492ce796", + csv_version_id="MlRpSz.4.jvVRFAZl8tGEum1P0Q0GtyS", + zip_version_id="vHbAM_FjTbjp5U12BkAelJu4KW6PLYFn" +) +stimulus_set_registry['Malania2007.equal2_fit'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_equal-2_fit', + bucket="brainio-brainscore", + csv_sha1="bafdfc855c164d3e5443d67dcf9eb7762443f964", + zip_sha1="e52fec1a79ac8837e331b180c2a8a140840d6666", + csv_version_id="PIXEW.2vHvjIBP0Q2KHIpnxns7t9o8Cf", + zip_version_id="h7pp84CYFGLKlPhveD0L5ogePqisk_I7" +) +stimulus_set_registry['Malania2007.equal16'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_equal-16', + bucket="brainio-brainscore", + csv_sha1="5fedcff56c302339c3451ae2edbcb846c39c3189", + zip_sha1="b30dc2dc90e4f3d88775622e558db963765f38e0", + csv_version_id="VmRGiQkhPALDwq74NpE2VpTiKTGn.30T", + zip_version_id="c.DOlVULXZingRJ9gVY_NbZwRrj_xs_i" +) +stimulus_set_registry['Malania2007.equal16_fit'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_equal-16_fit', + bucket="brainio-brainscore", + csv_sha1="3de3e5de19a638767a01ba68cb690dc746c29a77", + zip_sha1="1728920c5ea4fb7b3a3cf3c076165aca65c8b751", + csv_version_id="joAq8JBC_7axZDfLNFgoXFhTCLU_KKr_", + zip_version_id="77JRwdldaHDr6TLW1NnB5HucIrkUCVg." +) +stimulus_set_registry['Malania2007.long2'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_long-2', + bucket="brainio-brainscore", + csv_sha1="ba65316a63dc688d8dfb410219a28fd02850b991", + zip_sha1="7fd431fbbd4a4dc0cd271624d3297c19a28a70b5", + csv_version_id="_0fqObn6k5KvXurHMsuD4IqtrqbNskyo", + zip_version_id="foL92ndVAAAETzMYHdmMtwIwKxXYhAB." +) +stimulus_set_registry['Malania2007.long2_fit'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_long-2_fit', + bucket="brainio-brainscore", + csv_sha1="b91dd9261c1d47bdd37f9b60eb8066b7b719709f", + zip_sha1="5be3e1cd57b59081103715b5d318505166e0045e", + csv_version_id="mATh8lcVisdsDnPnU6ACE23iBPfpkLZA", + zip_version_id="6nEviShTyCYQKrmxyjDyNov9Skc77eXT" +) +stimulus_set_registry['Malania2007.long16'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_long-16', + bucket="brainio-brainscore", + csv_sha1="1f1b03319b81698ba5e7db389dcd4248f94e45ca", + zip_sha1="97c70462a28905b58058c687880188d634d357f0", + csv_version_id="4RtywQ40hfQA4N80g8lxEScAmMXFRg7E", + zip_version_id="lJy2QosABzHtiA6BJaE4OqCn1w1Jhz2k" +) +stimulus_set_registry['Malania2007.long16_fit'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_long-16_fit', + bucket="brainio-brainscore", + csv_sha1="d80a02c75b9908301c3c8dc9f7116fecf8e060ec", + zip_sha1="d8819b94d3f502d7a382c8a0db0a34627132e5e2", + csv_version_id="gOxY6tjnT7LO.FDeL1xkRmowl5wYeAia", + zip_version_id="71UAPTnZscIuqdx2dhuW9V0O0DO_TgTM" +) +stimulus_set_registry['Malania2007.short2'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_short-2', + bucket="brainio-brainscore", + csv_sha1="bf0252056d2084e855646f624700ab03c19cfc3d", + zip_sha1="eee1270feb7443e7e315d8feb7fb0a6b6908f554", + csv_version_id="zcJqM.ZPwJyiMRWa3RBdvv401yPnLQAp", + zip_version_id="C8WZzAAQ0JGHAAKii4JpvlRhcUOhgSj." +) +stimulus_set_registry['Malania2007.short2_fit'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_short-2_fit', + bucket="brainio-brainscore", + csv_sha1="73127d279a2cd254ae4f07b0053580851e84b00c", + zip_sha1="918736349d714a4f784c29bf7e7d218b103e128d", + csv_version_id="iwGRp3_ktAHfJ6r7ktSK9gsthDjKek70", + zip_version_id="6RpplJ9UVXTlvhmFSXla0Qa20b44m8Ds" +) +stimulus_set_registry['Malania2007.short4'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_short-4', + bucket="brainio-brainscore", + csv_sha1="816326d89d358f6592bd1f789e5c8d429fbca1cd", + zip_sha1="ff57d976ef75ede9148a4097e90d6cf6c8054d34", + csv_version_id="Waikk.bktXIncCUtCIAyB2EqynGk.H.F", + zip_version_id="rl_muxI4UEpwXVaXuhsqroG..COGILvR" +) +stimulus_set_registry['Malania2007.short4_fit'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_short-4_fit', + bucket="brainio-brainscore", + csv_sha1="3512cfd029f4e4299bc41ede519e691d80cfc3d5", + zip_sha1="301386408dd1fb8556881f9a171be2d43dbfec6e", + csv_version_id="UhisdJqiEmkQ_4zsUtAmaxtle2kMZdcD", + zip_version_id="xt_v0xgCB8YUptyPB0yZFHIUcel5MF_x" +) +stimulus_set_registry['Malania2007.short6'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_short-6', + bucket="brainio-brainscore", + csv_sha1="3d5dd9b48a56ba0c31de94b6221b97df962b6f8a", + zip_sha1="120d90a143d1577d4745c3f69291d0db6c7e512e", + csv_version_id="GwGHPJkMDdg8N_.boyj8qJ3ChsEx4w._", + zip_version_id="gIN1O4yz.THvK0Ifm5M3AI58ZACE1QFh" +) +stimulus_set_registry['Malania2007.short6_fit'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_short-6_fit', + bucket="brainio-brainscore", + csv_sha1="27a5be4fca190837fc5b75ed2cdbbffbf6b41338", + zip_sha1="c88e05c6cadec88a2c9475b0735323a2b049bd75", + csv_version_id="oMlj7wV85s00hJFE84ym0AJHLCfYHVA6", + zip_version_id="oS.KrBTlcYAgr_lWyA_bIjVc2js_VeUe" +) +stimulus_set_registry['Malania2007.short8'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_short-8', + bucket="brainio-brainscore", + csv_sha1="8fc35f607196b4c0cdcebd8102d17e3a637e5988", + zip_sha1="a9215ed0cb0f0333582dda65f6afd7015c506ba5", + csv_version_id="gzys8s7j7euMEl7JJpqBFLFHMpFjwbA7", + zip_version_id="3fYb4Iruh3lRKUwC1APqFH4CNbE5DEuk" +) +stimulus_set_registry['Malania2007.short8_fit'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_short-8_fit', + bucket="brainio-brainscore", + csv_sha1="aa4133a9fe19a3c9004a9cb5e6eb5a72564e4883", + zip_sha1="beb9f068794708e41750202b78c438538a40a8fb", + csv_version_id="7N1Z.uiagqBknJUSBQ4mVfHKWgocM5aA", + zip_version_id="kcEOPOkvWymO0wX5j_QKxcNPl9sZsjFd" +) +stimulus_set_registry['Malania2007.short16'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_short-16', + bucket="brainio-brainscore", + csv_sha1="addd260c9959f2f315db03c0a39c6c1b01fef685", + zip_sha1="cba4c2866ec692fb808471df7c2fed446d9fb3fe", + csv_version_id="Peu7WU5vanLoZNOFIAbuPzZNPDRgbCSX", + zip_version_id="wFkJkZMC8Fs_HfPJy32CMKcHJWeQIUDB" +) +stimulus_set_registry['Malania2007.short16_fit'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_short-16_fit', + bucket="brainio-brainscore", + csv_sha1="9b340fe242117482f6992f48a805297215ba9924", + zip_sha1="4a90d511a3ceb3307a672177a3ad6b76521e65e5", + csv_version_id="sYBPEmXDgbWipuepciLirlorQE3L8BLc", + zip_version_id="pYvOkrLxadkQ67K3__wmciNwaCW.hyyN" +) +stimulus_set_registry['Malania2007.vernier_only'] = lambda: load_stimulus_set_from_s3( + identifier='Malania2007_vernier-only', + bucket="brainio-brainscore", + csv_sha1="b2cb0f2ed32426b739f90187ae24ad4adf84110d", + zip_sha1="0e177aea523adc320070196fbb777af4cdba2144", + csv_version_id="c8wpZpqoMqdATlqdoq3srPUi_8fYg6a.", + zip_version_id="28lHgxERhw32Ux6IBCxWWTtRwIaRrwo6" +) diff --git a/brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py b/brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py new file mode 100644 index 000000000..091ac3fa6 --- /dev/null +++ b/brainscore_vision/data/malania2007/data_packaging/malania_data_assembly.py @@ -0,0 +1,79 @@ +from pathlib import Path +import numpy as np +import xarray as xr + +from brainio.assemblies import PropertyAssembly +from brainio.packaging import package_data_assembly +import pandas as pd + + +DATASETS = ['short-2', 'short-4', 'short-6', 'short-8', 'short-16', 'equal-2', + 'long-2', 'equal-16', 'long-16', 'vernier-only'] +NUM_SUBJECTS = {'short-2': 6, + 'short-4': 5, + 'short-6': 5, + 'short-8': 5, + 'short-16': 6, + 'equal-2': 5, + 'long-2': 5, + 'equal-16': 5, + 'long-16': 5, + 'vernier-only': 6} + + +def collect_malania_data_assembly(root_directory, dataset): + """ + Experiment Information: + - 5-6 observers per condition (for exact value, see NUM_SUBJECTS) + - 2AFC left/right offset discrimination task + - PEST staircase to 75% correct responses + - thresholds measured with a cumulative gaussian psychometric function with a likelihood fit + """ + # construct the assembly + metadata_directory = Path(f'{root_directory}/{dataset}/metadata_human.xlsx') + metadata = pd.read_excel(metadata_directory) + # Since subjects are uniquely held using 'unique_subject_id', drop the rows with a subject + # without measurement + assembly = PropertyAssembly(metadata['threshold'], + coords={ + 'subject': ('subject', metadata['subject_unique_id']), + }, + dims=['subject'] + ) + + # give the assembly an identifier name + assembly.name = f'Malania2007_{dataset}' + + # test subject numbers after removing the NaN subject + metadata = metadata.dropna(subset=['threshold'], axis=0) + assert len(metadata) == NUM_SUBJECTS[dataset] + + return assembly + + +def remove_subjects_with_nans(assembly1, assembly2): + # Find the indices of the subjects with NaN values in the first PropertyAssembly + nan_subjects = np.isnan(assembly1.values) + + # Convert the boolean array to a DataArray with the same coordinates as the input assemblies + nan_subjects_da = xr.DataArray(nan_subjects, coords=assembly1.coords, dims=assembly1.dims) + + # Filter out the subjects with NaN values from both PropertyAssemblies + filtered_assembly1 = assembly1.where(~nan_subjects_da, drop=True) + filtered_assembly2 = assembly2.where(~nan_subjects_da, drop=True) + + return filtered_assembly1, filtered_assembly2 + + +if __name__ == '__main__': + root_directory = Path(r'.') + for dataset in DATASETS: + assembly = collect_malania_data_assembly(root_directory, dataset) + # upload to S3 + prints = package_data_assembly(catalog_identifier=None, + proto_data_assembly=assembly, + assembly_identifier=assembly.name, + stimulus_set_identifier=assembly.name, + assembly_class_name="PropertyAssembly", + bucket_name="brainio-brainscore") + print(prints) diff --git a/brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py b/brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py new file mode 100644 index 000000000..8a9f63fde --- /dev/null +++ b/brainscore_vision/data/malania2007/data_packaging/malania_stimulus_set.py @@ -0,0 +1,79 @@ +import csv +from pathlib import Path +from brainio.stimuli import StimulusSet +from brainio.packaging import package_stimulus_set + + +# every stimulus set is separate, incl. baseline condition +STIMULUS_SETS = ['short-2', 'short-4', 'short-6', 'short-8', 'short-16', 'equal-2', + 'long-2', 'equal-16', 'long-16', 'vernier-only', 'short-2_fit', + 'short-4_fit', 'short-6_fit', 'short-8_fit', 'short-16_fit', + 'equal-2_fit', 'long-2_fit', 'equal-16_fit', 'long-16_fit'] +DATASET_LENGTHS = {'test': 50, 'fit': 500} + + +def collect_malania_stimulus_set(root_directory, dataset): + """ + Dataset Meta Info + + Reported in pixels: + - image_size_x; image_size_y + - vernier_position_x; vernier_position_y + + Reported in arcsec: + - vernier_height (height of the vernier elements combined, - middle gap) + - vernier_offset (horizontal offset between flankers) + - flanker_height (height of the flanker elements) + - flanker_spacing (distance between a flanker element and another flanker element) + - line_width (width of all the lines in all elements) + - flanker_distance (distance between a flanker and a vernier) + """ + stimuli = [] + stimulus_paths = {} + + dataset_type = 'fit' if dataset[-3:] == 'fit' else 'test' + metadata_directory = Path(f'{root_directory}/{dataset}/metadata.csv') + image_directory = Path(f'{root_directory}/{dataset}/images') + with open(metadata_directory, 'r') as metadata: + reader = csv.DictReader(metadata) + for row in reader: + stimuli.append({ + 'image_size_x_pix': int(row['image_size_x_pix']), + 'image_size_y_pix': int(row['image_size_y_pix']), + 'image_size_c': int(row['image_size_c']), + 'image_size_degrees': float(row['image_size_degrees']), + 'vernier_height_arcsec': float(row['vernier_height_arcsec']), + 'vernier_offset_arcsec': float(row['vernier_offset_arcsec']), + 'image_label': row['image_label'], + 'flanker_height_arcsec': float(row['flanker_height_arcsec']), + 'flanker_spacing_arcsec': float(row['flanker_spacing_arcsec']), + 'line_width_arcsec': float(row['line_width_arcsec']), + 'flanker_distance_arcsec': float(row['flanker_distance_arcsec']), + 'num_flankers': int(row['num_flankers']), + 'vernier_position_x_pix': int(row['vernier_position_x_pix']), + 'vernier_position_y_pix': int(row['vernier_position_y_pix']), + 'stimulus_id': str(row['stimulus_id']), + }) + stimulus_paths[row['stimulus_id']] = Path(f'{image_directory}/{row["filename"]}') + + stimuli = StimulusSet(stimuli) + stimuli.stimulus_paths = stimulus_paths + stimuli.name = f'Malania2007_{dataset}' # give the StimulusSet an identifier name + stimuli.identifier = f'Malania2007_{dataset}' + + # Ensure expected number of stimuli in datasets + assert len(stimuli) == DATASET_LENGTHS[dataset_type] + return stimuli + + +if __name__ == '__main__': + root_directory = Path(r'../data/malania2007/data_packaging/') + for stimulus_set in STIMULUS_SETS: + stimuli = collect_malania_stimulus_set(root_directory, stimulus_set) + + # upload to S3 + prints = package_stimulus_set(catalog_name=None, + proto_stimulus_set=stimuli, + stimulus_set_identifier=stimuli.name, + bucket_name="brainio-brainscore") + print(prints) diff --git a/brainscore_vision/data/malania2007/test.py b/brainscore_vision/data/malania2007/test.py new file mode 100644 index 000000000..faf216749 --- /dev/null +++ b/brainscore_vision/data/malania2007/test.py @@ -0,0 +1,147 @@ +import numpy as np +import pytest + +from brainscore_vision import load_stimulus_set, load_dataset +from brainscore_vision.benchmarks.malania2007.benchmark import DATASETS + + +@pytest.mark.private_access +class TestAssemblies: + # test the number of subjects: + @pytest.mark.parametrize('identifier, num_subjects', [ + ('short2', 6), + ('short4', 5), + ('short6', 5), + ('short8', 5), + ('short16', 6), + ('equal2', 5), + ('long2', 5), + ('equal16', 5), + ('long16', 5), + ('vernier_only', 6) + ]) + def test_num_subjects(self, identifier, num_subjects): + assembly = load_dataset(f"Malania2007.{identifier}") + assembly = assembly.dropna(dim='subject') + assert len(np.unique(assembly['subject'].values)) == num_subjects + + # test assembly coords present in ALL 17 sets: + @pytest.mark.parametrize('identifier', [ + 'short2', + 'short4', + 'short6', + 'short8', + 'short16', + 'equal2', + 'long2', + 'equal16', + 'long16', + 'vernier_only', + ]) + @pytest.mark.parametrize('field', [ + 'subject' + ]) + def test_fields_present(self, identifier, field): + assembly = load_dataset(f"Malania2007.{identifier}") + assert hasattr(assembly, field) + + +@pytest.mark.private_access +class TestStimulusSets: + # test stimulus_set data: + @pytest.mark.parametrize('identifier', [ + 'short2', + 'short4', + 'short6', + 'short8', + 'short16', + 'equal2', + 'long2', + 'equal16', + 'long16', + 'short2_fit', + 'short4_fit', + 'short6_fit', + 'short8_fit', + 'short16_fit', + 'equal2_fit', + 'long2_fit', + 'equal16_fit', + 'long16_fit', + 'vernier_only' + ]) + def test_stimulus_set_exist(self, identifier): + full_name = f"Malania2007.{identifier}" + stimulus_set = load_stimulus_set(full_name) + assert stimulus_set is not None + stripped_actual_identifier = stimulus_set.identifier.replace('.', '').replace('_', '').replace('-', '') + stripped_expected_identifier = full_name.replace('.', '').replace('_', '').replace('-', '') + assert stripped_actual_identifier == stripped_expected_identifier + + @pytest.mark.parametrize('identifier, num_images', [ + ('short2', 50), + ('short4', 50), + ('short6', 50), + ('short8', 50), + ('short16', 50), + ('equal2', 50), + ('long2', 50), + ('equal16', 50), + ('long16', 50), + ('short2_fit', 500), + ('short4_fit', 500), + ('short6_fit', 500), + ('short8_fit', 500), + ('short16_fit', 500), + ('equal2_fit', 500), + ('long2_fit', 500), + ('equal16_fit', 500), + ('long16_fit', 500), + ('vernier_only', 50) + ]) + def test_number_of_images(self, identifier, num_images): + stimulus_set = load_stimulus_set(f"Malania2007.{identifier}") + assert len(np.unique(stimulus_set['stimulus_id'].values)) == num_images + + # tests stimulus_set coords for the 14 "normal" sets: + @pytest.mark.parametrize('identifier', [ + 'short2', + 'short4', + 'short6', + 'short8', + 'short16', + 'equal2', + 'long2', + 'equal16', + 'long16', + 'short2_fit', + 'short4_fit', + 'short6_fit', + 'short8_fit', + 'short16_fit', + 'equal2_fit', + 'long2_fit', + 'equal16_fit', + 'long16_fit', + 'vernier_only' + ]) + @pytest.mark.parametrize('field', [ + 'image_size_x_pix', + 'image_size_y_pix', + 'image_size_c', + 'image_size_degrees', + 'vernier_height_arcsec', + 'vernier_offset_arcsec', + 'image_label', + 'flanker_height_arcsec', + 'flanker_spacing_arcsec', + 'line_width_arcsec', + 'flanker_distance_arcsec', + 'num_flankers', + 'vernier_position_x_pix', + 'vernier_position_y_pix', + 'stimulus_id', + ]) + def test_fields_present(self, identifier, field): + stimulus_set = load_stimulus_set(f"Malania2007.{identifier}") + assert hasattr(stimulus_set, field) diff --git a/brainscore_vision/metrics/threshold/__init__.py b/brainscore_vision/metrics/threshold/__init__.py new file mode 100644 index 000000000..69f6102e8 --- /dev/null +++ b/brainscore_vision/metrics/threshold/__init__.py @@ -0,0 +1,5 @@ +from brainscore_vision import metric_registry +from .metric import Threshold, ThresholdElevation + +metric_registry['threshold'] = Threshold +metric_registry['threshold_elevation'] = ThresholdElevation diff --git a/brainscore_vision/metrics/threshold/metric.py b/brainscore_vision/metrics/threshold/metric.py new file mode 100644 index 000000000..2b04270e3 --- /dev/null +++ b/brainscore_vision/metrics/threshold/metric.py @@ -0,0 +1,482 @@ +from typing import Dict, Union, Tuple, List, Optional, Callable + +import numpy as np +from scipy.optimize import minimize +from scipy.stats import norm +from sklearn.metrics import r2_score +import matplotlib.pyplot as plt + +from brainscore_core.metrics import Metric, Score +from brainio.assemblies import PropertyAssembly, BehavioralAssembly + + +def psychometric_cum_gauss(x: np.array, alpha: float, beta: float, lambda_: float, gamma: float = 0.5) -> float: + """ + The classic psychometric function as implemented in Wichmann & Hill (2001). The psychometric function: I. + Fitting, sampling, and goodness of fit, eq. 1. + + :param x: the independent variables of the data + :param alpha: the slope parameter + :param beta: the mean of the cdf parameter + :param lambda_: the lapse rate + :param gamma: the lower bound of the fit + + :return: the psychometric function values for the given parameters evaluated at `x`. + """ + return gamma + (1 - gamma - lambda_) * norm.cdf(alpha * (x - beta)) + + +def inverse_psychometric_cum_gauss(y: np.array, alpha: float, beta: float, lambda_: float, gamma: float = 0.5) -> float: + """The inverse of psychometric_cum_gauss.""" + return beta + (norm.ppf((y - gamma) / (1 - gamma - lambda_)) / alpha) + + +def cum_gauss_neg_log_likelihood(params: Tuple[float, ...], x: np.array, y: np.array) -> float: + """The negative log likelihood function for psychometric_cum_gauss.""" + alpha, beta, lambda_ = params + p = psychometric_cum_gauss(x, alpha, beta, lambda_) + log_likelihood = y * np.log(p) + (1 - y) * np.log(1 - p) + return -np.sum(log_likelihood) + + +def get_predicted(params: Tuple[float, ...], x: np.array, fit_fn: Callable) -> np.array: + """Returns the predicted values based on the model parameters.""" + return fit_fn(x, *params) + + +def grid_search(x: np.array, + y: np.array, + alpha_values: np.array = np.logspace(-3, 1, 50), + beta_values: np.array = None, + fit_fn: Callable = psychometric_cum_gauss, + fit_log_likelihood_fn: Callable = cum_gauss_neg_log_likelihood, + fit_bounds: Tuple = ((None, None), (None, None), (0.03, 0.5)) + ) -> Tuple[Tuple[float, ...], float]: + """ + A classic simplified procedure for running sparse grid search over the slope and mean parameters of the + psychometric function. + This function is implemented here instead of using sklearn.GridSearchCV since we would have to make a custom + sklearn estimator class to use GridSearchCV with psychometric functions, likely increasing code bloat + substantially. + + :param x: the independent variables of the data + :param y: the measured accuracy rates for the given x-values + :param alpha_values: the alpha values for the chosen fit function to grid search over + :param beta_values: the beta values for the chosen fit function to grid search over + :param fit_fn: the psychometric function that is fit + :param fit_log_likelihood_fn: the log likelihood function that computes the log likelihood of its corresponding + fit function + :param fit_bounds: the bounds assigned to the fit function called by fit_log_likelihood_fn. + The default fit_bounds are assigned as: + alpha: (None, None), to allow any slope + beta: (None, None), any inflection point is allowed, as that is controlled for in the + Threshold class + lambda_: (0.03, 0.5)), to require at least a small lapse rate, as is regularly done in + human fitting + + :return: the parameters of the best fit in the grid search + """ + assert len(x) == len(y) + # Default the beta_values grid search to the measured x-points. + if beta_values is None: + beta_values = x + + # initialize best values for a fit + best_alpha, best_beta, best_lambda = None, None, None + min_neg_log_likelihood = np.inf + + for alpha_guess in alpha_values: + for beta_guess in beta_values: + initial_guess = np.array([alpha_guess, beta_guess, 1 - np.max(y)]) # lapse rate guess set to the maximum y + + # wrap inside a RuntimeError block to catch the RuntimeError thrown by scipy.minimize if a fit + # entirely fails. The case where all fits fail here is handled by the Threshold metric. + try: + result = minimize(fit_log_likelihood_fn, initial_guess, args=(x, y), + method='L-BFGS-B', bounds=fit_bounds) + alpha_hat, beta_hat, lambda_hat = result.x + neg_log_likelihood_hat = fit_log_likelihood_fn([alpha_hat, beta_hat, lambda_hat], x, y) + + if neg_log_likelihood_hat < min_neg_log_likelihood: + min_neg_log_likelihood = neg_log_likelihood_hat + best_alpha, best_beta, best_lambda = alpha_hat, beta_hat, lambda_hat + except RuntimeError: + pass + + y_pred = fit_fn(x, best_alpha, best_beta, best_lambda) + r2 = r2_score(y, y_pred) # R^2 of the fit + return (best_alpha, best_beta, best_lambda), r2 + + +class Threshold(Metric): + """ + Computes a psychometric threshold function from model responses and compares against human-computed psychometric + thresholds. + + The model comparison to human data is currently individual-subject based, i.e., models and ceilings are compared + against the mean of the distance of the model threshold to human thresholds. + """ + def __init__(self, + independent_variable: str, + fit_function=psychometric_cum_gauss, + fit_inverse_function=inverse_psychometric_cum_gauss, + threshold_accuracy: Union[str, float] = 'inflection', + required_accuracy: Optional[float] = 0.6 + ): + """ + :param independent_variable: The independent variable in the benchmark that the threshold is computed + over. + :param fit_function: The function used to fit the threshold. + :param fit_inverse_function: The inverse of fit_function used to find the threshold from the fit. + :param threshold_accuracy: The accuracy at which the threshold should be evaluated at. This can be + either a string Literal['inflection'] or a float. When Literal['inflection'] + is used, the function finds the inflection point of the curve and evaluates + the threshold at that level. When a float is used, the function evaluates + the threshold at that level. + :param required_accuracy: The minimum accuracy required for the psychometric function fit to be considered. + """ + self.fit_function = fit_function + self.fit_inverse_function = fit_inverse_function + self._independent_variable = independent_variable + self.threshold_accuracy = threshold_accuracy + self.required_accuracy = required_accuracy + + def __call__(self, source: Union[BehavioralAssembly, float], target: Union[list, PropertyAssembly]) -> Score: + """ + :param source: Either a BehavioralAssembly containing model responses to individual stimuli, or a pre-computed + threshold as a float. + :param target: Either a list containing human thresholds (for the ceiling function & ThresholdElevation), + or a PropertyAsssembly. + :return: A Score containing the evaluated model's distance to target thresholds. + """ + # compute threshold from measurements if the input is not a threshold already + if isinstance(source, float): + source_threshold = source + elif isinstance(source, BehavioralAssembly): + source_threshold = self.compute_threshold(source, self._independent_variable) + # check whether the psychometric function fit was successful - if not, return a score of 0 + if source_threshold == 'fit_fail': + score = Score(0.) + score.attrs['error'] = 0. + return score + else: + raise TypeError(f'source is type {type(source)}, but type BehavioralAssembly or float is required.') + return self.scoring_function(source_threshold, target) + + def ceiling(self, assembly: Union[PropertyAssembly, Dict[str, PropertyAssembly]]) -> Score: + """ + Computed by one-vs all for each of the NUM_TRIALS human indexes. One index is removed, and scored against + a pool of the other values. + + Currently copied with modification from 'https://github.com/brain-score/brain-score/blob/ + jacob2020_occlusion_depth_ordering/brainscore/metrics/data_cloud_comparision.py#L54'. + + :param assembly: the human PropertyAssembly containing human responses, or a dict containing the + PropertyAssemblies of the ThresholdElevation metric. + :return: Score object with coords center (ceiling) and error (STD) + """ + human_thresholds: list = assembly.values.tolist() + scores = [] + for i in range(len(human_thresholds)): + random_state = np.random.RandomState(i) + random_human_score = random_state.choice(human_thresholds, replace=False) + metric = Threshold(self._independent_variable, self.fit_function, self.fit_inverse_function, + self.threshold_accuracy) + human_thresholds.remove(random_human_score) + score = metric(random_human_score, human_thresholds) + human_thresholds.append(random_human_score) + scores.append(score.values) + + ceiling, ceiling_error = np.mean(scores), np.std(scores) + ceiling = Score(ceiling) + ceiling.attrs['error'] = ceiling_error + return ceiling + + def compute_threshold(self, source: BehavioralAssembly, independent_variable: str) -> Union[float, str]: + """Converts the source BehavioralAssembly to a threshold float value.""" + assert len(source.values) == len(source[independent_variable].values) + + x_points = source[independent_variable].values + accuracies = self.convert_proba_to_correct(source) + if np.mean(accuracies) < self.required_accuracy: + print('Psychometric threshold fit failure due to low accuracy.') + fit_params = 'fit_fail' + else: + fit_params, measurement_max = self.fit_threshold_function(x_points, accuracies) + if (type(fit_params) == str) and (fit_params == 'fit_fail'): + return fit_params + + if self.threshold_accuracy == 'inflection': + self.threshold_accuracy = self.inflection_accuracy(x_points, fit_params) + + threshold = self.find_threshold(self.threshold_accuracy, fit_params) + + # check whether the fit is outside the measured model responses to discard spurious thresholds + if (threshold > measurement_max) or np.isnan(threshold): + print('Fit fail because threshold is outside of the measured range of responses.') + return 'fit_fail' + return threshold + + def fit_threshold_function(self, x_points: np.array, y_points: np.array) -> Union[np.array, str]: + """ + A function that takes the x and y-points of the measured variable and handles the fitting of the + psychometric threshold function. + + Returns either the fit parameters for self.fit_function or a string tag that indicates the failure + of the psychometric curve fit. + """ + x_points, y_points = self.aggregate_psychometric_fit_data(x_points, y_points) + aggregated_x_points, aggregated_y_points, at_least_third_remaining = self.remove_data_after_asymptote(x_points, + y_points) + measurement_max = np.max(aggregated_x_points) + if not at_least_third_remaining: + # This failure indicates that there is too little data to accurately fit the psychometric function. + print('Psychometric curve fit fail because performance is decreasing with the independent variable.') + return 'fit_fail', measurement_max + + params, r2 = grid_search(aggregated_x_points, aggregated_y_points) + + # if all the fits in the grid search failed, there will be a None value in params. In this case, we reject + # the fit. This typically only ever happens when a model outputs one value for all test images. + if None in params: + params = 'fit_fail' + + # remove fits to random data. This choice is preferred over a chi^2 test since chi^2 discards a lot of fits + # that would be acceptable in a human case. + if r2 < 0.4: + print('Fit fail due to low fit R^2.') + params = 'fit_fail' + + return params, measurement_max + + def find_threshold(self, threshold_accuracy: float, fit_params: Tuple[float, ...]) -> float: + """ + A function that uses the inverse fit function to find the value of the threshold in terms of + the independent variable (self._independent_variable). + """ + threshold = self.fit_inverse_function(threshold_accuracy, *fit_params) + return threshold + + def inflection_accuracy(self, x_points: np.array, fit_params: np.array) -> float: + """ + A function that finds the accuracy at the inflection point of the fit function. Useful if you do not care + about the specific threshold accuracy, but rather about e.g. the elevation at the inflection point. + """ + max_fit_accuracy = self.fit_function(np.max(x_points), *fit_params) + min_fit_accuracy = self.fit_function(np.min(x_points), *fit_params) + threshold_accuracy = min_fit_accuracy + (max_fit_accuracy - min_fit_accuracy) / 2 + return threshold_accuracy + + @staticmethod + def aggregate_psychometric_fit_data(x_points, y_points): + unique_x = np.unique(x_points) + correct_rate = np.zeros(len(unique_x)) + + for i, x in enumerate(unique_x): + trials = np.sum(x_points == x) + correct_trials = np.sum((x_points == x) & (y_points == 1)) + correct_rate[i] = correct_trials / trials + + return unique_x, correct_rate + + @staticmethod + def scoring_function(source: float, target: Union[list, PropertyAssembly]) -> Score: + """ + Computes the average distance of the source from each of the individual targets in units of the + individual targets. This is generally a more stringent scoring method than pool_score, aimed + to measure the average of the individual target effects. + """ + raw_scores = [] + for target_value in target: + # This score = 0 when the source exceeds target_value by 100% + raw_score = max((1 - ((np.abs(target_value - source)) / target_value)), 0) + raw_scores.append(raw_score) + + scores_mean, scores_std = np.mean(raw_scores), np.std(raw_scores) + score = Score(scores_mean) + score.attrs['raw'] = raw_scores + score.attrs['error'] = scores_std + return score + + @staticmethod + def convert_proba_to_correct(source: BehavioralAssembly) -> np.array: + """Converts the probability values returned by models doing probability tasks to behavioral choices.""" + decisions = np.argmax(source.values, axis=1) + correct = [] + for presentation, decision in enumerate(decisions): + if source['choice'].values[decision] == source['image_label'].values[presentation]: + correct.append(1) + else: + correct.append(0) + return np.array(correct) + + @staticmethod + def remove_data_after_asymptote(x_values, y_values): + """ + A function that removes all data after the point at which all values of the measured variable are 1 standard + deviation less than the maximum. + + This is done to simulate the procedure in which an experimenter fine-tunes the stimuli in a pilot experiment + to the given system (e.g., humans) such that they only measure data in a region within which the psychometric + fit is monotone (as per the function fit assumption). When this assumption is violated, the function fit + is not a valid measure of the underlying performance function. + + There are circumstances in which this behavior is expected (e.g., crowding). When e.g. a vernier element's + offset is increased enough, the task may paradoxically become more difficult, as the offset grows large + enough such that the relevant elements do not fall within a spatially relevant window, or group with the + flankers more than with each other due to constant target-flanker distance. + """ + + std_dev = np.std(y_values) + max_y_idx = np.argmax(y_values) + + # initialize the index for the first data point after the maximum y_value + # that deviates from the maximum by at least 1 standard deviation + index_to_remove = None + + # iterate through the y_values after the maximum y_value + for idx, y in enumerate(y_values[max_y_idx + 1:], start=max_y_idx + 1): + # check if all the remaining y_values deviate by at least 1 standard deviation + if all([abs(val - y_values[max_y_idx]) >= std_dev for val in y_values[idx:]]): + index_to_remove = idx + break + pre_remove_length = len(y_values) + # if we found an index to remove, remove the data after that index + if index_to_remove is not None: + x_values = x_values[:index_to_remove] + y_values = y_values[:index_to_remove] + + # check if at least a third of the elements remain. This is done so that we have an adequate amount of data + # to fit a psychometric threshold on. + remaining_fraction = len(y_values) / pre_remove_length + is_at_least_third_remaining = remaining_fraction >= 1 / 3 + + return x_values, y_values, is_at_least_third_remaining + + +class ThresholdElevation(Threshold): + """ + Computes a threshold elevation from two conditions: a baseline condition and a test condition by dividing + the threshold of the test condition by the baseline condition. In other words, + + `threshold_elevation = test_condition_threshold / baseline_condition_threshold`. + """ + def __init__(self, + independent_variable: str, + baseline_condition: str, + test_condition: str, + threshold_accuracy: Union[str, float] = 'inflection', + required_baseline_accuracy: Optional[float] = 0.6, + required_test_accuracy: Optional[float] = 0.6 + ): + """ + :param independent_variable: The independent variable in the benchmark that the threshold is computed + over. + :param baseline_condition: The baseline condition against which threshold elevation is measured. + :param test_condition: The test condition that is used to measure threshold elevation.. + :param threshold_accuracy: The accuracy at which the threshold should be evaluated at. This can be + either a string Literal['inflection'] or a float. When Literal['inflection'] + is used, the function finds the inflection point of the curve and evaluates + the threshold at that level. When a float is used, the function evaluates + the threshold at that level. + :param required_baseline_accuracy: The minimum accuracy required for the psychometric function fit to be + considered for the baseline condition. + :param required_test_accuracy: The minimum accuracy required for the psychometric function fit to be + considered for the test condition. + """ + super(ThresholdElevation, self).__init__(independent_variable) + self.baseline_threshold_metric = Threshold(self._independent_variable, + threshold_accuracy=threshold_accuracy, + required_accuracy=required_baseline_accuracy) + self.test_threshold_metric = Threshold(self._independent_variable, + threshold_accuracy=threshold_accuracy, + required_accuracy=required_test_accuracy) + self.baseline_condition = baseline_condition + self.test_condition = test_condition + self.threshold_accuracy = threshold_accuracy + + def __call__(self, + source: Union[float, Dict[str, BehavioralAssembly]], + target: Union[list, PropertyAssembly, Dict[str, PropertyAssembly]] + ) -> Score: + """ + :param source: Either a dictionary containing the BehavioralAssemblies for the test condition and the + baseline condition, or a pre-computed float threshold elevation. If Dict, Dict + keys should be 'condition_assembly' and 'baseline_assembly' respectively. + :param target: Either a dictionary containing the PropertyAssemblies for the test condition and the + baseline condition, or a list of pre-computed threshold elevations. If Dict, Dict + keys should be 'condition_assembly' and 'baseline_assembly' respectively. + :return: A score containing the evaluated model's ceiling-adjusted distance to target threshold elevations. + """ + # check whether source is a threshold elevation already - if not, compute it. + if isinstance(source, float): + raw_source_threshold_elevation = source + elif isinstance(source, Dict): + source_baseline_threshold = self.baseline_threshold_metric.compute_threshold(source[self.baseline_condition], + self._independent_variable) + + # if using the inflection accuracy, get the inflection point from the baseline condition, and use that + # for the test condition. + if self.threshold_accuracy == 'inflection': + self.test_threshold_metric.threshold_accuracy = self.baseline_threshold_metric.threshold_accuracy + source_test_threshold = self.test_threshold_metric.compute_threshold(source[self.test_condition], + self._independent_variable) + if source_baseline_threshold == 'fit_fail' or source_test_threshold == 'fit_fail': + return Score(0.) # psychometric function could not be fit -- this typically means that the model is at chance throughout + raw_source_threshold_elevation = source_test_threshold / source_baseline_threshold + else: + raise TypeError(f'source is type {type(source)}, but type BehavioralAssembly or float is required.') + + # check whether the targets are threshold elevations already - if not, compute them + if isinstance(target, list): + target_threshold_elevations = target + elif isinstance(target, PropertyAssembly): + target_threshold_elevations = target.values.tolist() + elif isinstance(target, Dict): + target_threshold_elevations = self.compute_threshold_elevations(target) + else: + raise TypeError(f'target is type {type(target)}, but type PropertyAssembly or list is required.') + + # compare threshold elevation to target threshold elevations + return self.scoring_function(raw_source_threshold_elevation, target_threshold_elevations) + + def ceiling(self, assemblies: Dict[str, PropertyAssembly]) -> Score: + """ + Computed by one-vs all for each of the NUM_TRIALS human indexes. One index is removed, and scored against + a pool of the other values. + + Currently copied with modification from 'https://github.com/brain-score/brain-score/blob/ + jacob2020_occlusion_depth_ordering/brainscore/metrics/data_cloud_comparision.py#L54'. + """ + human_threshold_elevations = self.compute_threshold_elevations(assemblies) + scores = [] + for i in range(len(human_threshold_elevations)): + random_state = np.random.RandomState(i) + random_human_score = random_state.choice(human_threshold_elevations, replace=False) + metric = ThresholdElevation(self._independent_variable, self.baseline_condition, self.test_condition, + self.threshold_accuracy) + human_threshold_elevations.remove(random_human_score) + score = metric(random_human_score, human_threshold_elevations) + human_threshold_elevations.append(random_human_score) + scores.append(score.values) + + ceiling, ceiling_error = np.mean(scores), np.std(scores) + ceiling = Score(ceiling) + ceiling.attrs['raw'] = scores + ceiling.attrs['error'] = ceiling_error + return ceiling + + @staticmethod + def compute_threshold_elevations(assemblies: Dict[str, PropertyAssembly]) -> List: + """ + Computes the threshold elevations of a baseline condition and a test condition: + + `threshold_elevation = test_condition_threshold / baseline_condition_threshold`. + """ + baseline_assembly = assemblies['baseline_assembly'] + condition_assembly = assemblies['condition_assembly'] + threshold_elevations = [] + for i, baseline_threshold in enumerate(baseline_assembly.values): + condition_threshold = condition_assembly.values[i] + threshold_elevations.append(condition_threshold / baseline_threshold) + return threshold_elevations diff --git a/brainscore_vision/metrics/threshold/test.py b/brainscore_vision/metrics/threshold/test.py new file mode 100644 index 000000000..84f0c7214 --- /dev/null +++ b/brainscore_vision/metrics/threshold/test.py @@ -0,0 +1,71 @@ +from pytest import approx + +from brainio.assemblies import PropertyAssembly +from brainscore_vision import load_metric + + +def test_threshold_score_from_thresholds(): + assembly = _make_threshold_data() + # independent_variable is not used since we compute from thresholds, and do not need to fit them + metric = load_metric('threshold', independent_variable='placeholder') + score = metric(float(assembly.sel(subject='A').values), assembly) + assert score == approx(0.5625) + + +def test_threshold_elevation_score_from_threshold_elevations(): + assembly = _make_threshold_elevation_data() + # independent_variable is not used since we compute from thresholds, and do not need to fit them + metric = load_metric('threshold_elevation', + independent_variable='placeholder', + baseline_condition='placeholder', + test_condition='placeholder') + score = metric(float(assembly.sel(subject='A').values), assembly) + assert score == approx(0.525) + + +def test_threshold_has_error(): + assembly = _make_threshold_data() + metric = load_metric('threshold', independent_variable='placeholder') + score = metric(float(assembly.sel(subject='A').values), assembly) + assert hasattr(score, 'error') + + +def test_threshold_elevation_has_error(): + assembly = _make_threshold_elevation_data() + metric = load_metric('threshold_elevation', + independent_variable='placeholder', + baseline_condition='placeholder', + test_condition='placeholder') + score = metric(float(assembly.sel(subject='A').values), assembly) + assert hasattr(score, 'error') + + +def test_threshold_has_raw(): + assembly = _make_threshold_data() + metric = load_metric('threshold', independent_variable='placeholder') + score = metric(float(assembly.sel(subject='A').values), assembly) + assert hasattr(score, 'raw') + + +def test_threshold_elevation_has_raw(): + assembly = _make_threshold_elevation_data() + metric = load_metric('threshold_elevation', + independent_variable='placeholder', + baseline_condition='placeholder', + test_condition='placeholder') + score = metric(float(assembly.sel(subject='A').values), assembly) + assert hasattr(score, 'raw') + + +def _make_threshold_data(): + # Subjects have thresholds of 10, 20, 40, and 20 respectively. + return PropertyAssembly([10.0, 20.0, 40.0, 20.0], + coords={'subject': ('subject', ['A', 'B', 'C', 'D'])}, + dims=['subject']) + + +def _make_threshold_elevation_data(): + # Subjects have threshold elevations of 3, 2, 1.5, and 5 respectively. + return PropertyAssembly([3.0, 2.0, 1.5, 5.0], + coords={'subject': ('subject', ['A', 'B', 'C', 'D'])}, + dims=['subject']) From bca9905dbf6e3c51dbe4318b7d27201a55261d53 Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Sat, 6 Jul 2024 09:40:12 +0200 Subject: [PATCH 40/68] remove unnecessary (and breaking) import (#1011) --- brainscore_vision/metrics/threshold/metric.py | 1 - 1 file changed, 1 deletion(-) diff --git a/brainscore_vision/metrics/threshold/metric.py b/brainscore_vision/metrics/threshold/metric.py index 2b04270e3..8c11d23df 100644 --- a/brainscore_vision/metrics/threshold/metric.py +++ b/brainscore_vision/metrics/threshold/metric.py @@ -4,7 +4,6 @@ from scipy.optimize import minimize from scipy.stats import norm from sklearn.metrics import r2_score -import matplotlib.pyplot as plt from brainscore_core.metrics import Metric, Score from brainio.assemblies import PropertyAssembly, BehavioralAssembly From 2a13317758a4ec36566e964e1712217fdf8583dd Mon Sep 17 00:00:00 2001 From: Michael Ferguson Date: Sat, 6 Jul 2024 05:22:40 -0400 Subject: [PATCH 41/68] Add bracci2019 benchmark (competition) (#967) * add bracci2019 benchmark * git comments * benchmark now produces scores --- .../benchmarks/bracci2019/__init__.py | 8 + .../benchmarks/bracci2019/benchmark.py | 284 ++++++++++++++++++ .../benchmarks/bracci2019/requirements.txt | 3 + brainscore_vision/data/bracci2019/__init__.py | 36 +++ .../data/bracci2019/data_packaging.py | 221 ++++++++++++++ brainscore_vision/data/bracci2019/test.py | 16 + 6 files changed, 568 insertions(+) create mode 100644 brainscore_vision/benchmarks/bracci2019/__init__.py create mode 100644 brainscore_vision/benchmarks/bracci2019/benchmark.py create mode 100644 brainscore_vision/benchmarks/bracci2019/requirements.txt create mode 100644 brainscore_vision/data/bracci2019/__init__.py create mode 100644 brainscore_vision/data/bracci2019/data_packaging.py create mode 100644 brainscore_vision/data/bracci2019/test.py diff --git a/brainscore_vision/benchmarks/bracci2019/__init__.py b/brainscore_vision/benchmarks/bracci2019/__init__.py new file mode 100644 index 000000000..f20938672 --- /dev/null +++ b/brainscore_vision/benchmarks/bracci2019/__init__.py @@ -0,0 +1,8 @@ +from brainscore_vision import benchmark_registry +from .benchmark import Bracci2019RSA + +# benchmark_registry['Bracci2019_RSA-V1'] = lambda: _Bracci2019RSA("V1") +# benchmark_registry['Bracci2019_RSA-posteriorVTC'] = lambda: _Bracci2019RSA("posteriorVTC") +benchmark_registry['Bracci2019.anteriorVTC-rdm'] = lambda: Bracci2019RSA("anteriorVTC") + + diff --git a/brainscore_vision/benchmarks/bracci2019/benchmark.py b/brainscore_vision/benchmarks/bracci2019/benchmark.py new file mode 100644 index 000000000..3d960a495 --- /dev/null +++ b/brainscore_vision/benchmarks/bracci2019/benchmark.py @@ -0,0 +1,284 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Jun 21 22:18:07 2024 + +@author: costantino_ai +""" + +import xarray as xr +import numpy as np +from scipy.stats import spearmanr +from brainscore_vision.benchmarks import BenchmarkBase +from brainscore_vision.benchmark_helpers.screen import place_on_screen +from brainscore_vision.model_interface import BrainModel +from brainscore_vision import load_stimulus_set, load_metric, load_dataset +from brainscore_vision.utils import LazyLoad +from brainio.assemblies import NeuroidAssembly +from brainscore_core.metrics import Score + + +BIBTEX = """@article{bracci2019ventral, + title={The ventral visual pathway represents animal appearance over animacy, unlike human behavior and deep neural networks}, + author={Bracci, Stefania and Ritchie, J Brendan and Kalfas, Ioannis and de Beeck, Hans P Op}, + journal={Journal of Neuroscience}, + volume={39}, + number={33}, + pages={6513--6525}, + year={2019}, + publisher={Soc Neuroscience} +}""" + +TIME_BIN_ST, TIME_BIN_END = ( + 70, + 170, +) # standard core object recognition response, following Majaj*, Hong*, et al. 2015 + + +class _Bracci2019RSA(BenchmarkBase): + """ + A benchmark class to measure the similarity between model-generated confusion probabilities + and human confusion data in visual tasks, specifically designed for the Maniquet2024 dataset. + + Attributes: + _metric (ConfusionSimilarity): The metric used to compare model outputs with human data. + _fitting_stimuli (StimulusSet): Stimulus set used for training or fitting the model. + _stimulus_set (StimulusSet): Stimulus set used for testing the model. + _human_assembly (DataAssembly): Human behavioral data for comparison. + _visual_degrees (int): The size of stimuli in visual degrees as presented to humans. + _number_of_trials (int): Number of trials to average over for the model predictions. + """ + + def __init__(self, region): + """ + Initializes the benchmark by setting up the necessary parameters. + """ + + # Initialize the metric for evaluating confusion similarity + self._metric = load_metric("rdm") + + # # Load testing stimuli from the stimulus set registry + # self._stimulus_set = stimulus_set_registry["Bracci2019"]() + + # # Load human behavioral data from the data registry + # self._human_assembly = data_registry["Bracci2019"]() + + # Load testing stimuli from the stimulus set registry + self._stimulus_set = load_stimulus_set('Bracci2019') + + # Load human behavioral data from the data registry + self._human_assembly = load_dataset('Bracci2019') + + # Set the visual degrees to which the human data was exposed + self._visual_degrees = 8 + + # Set the number of trials to perform + self._number_of_trials = 1 + + # Set the region to record from + self._region = region + + # Define a mapping + self._roi_map = { + "V1": (0, "V1"), + "posteriorVTC": (1, "IT"), + "anteriorVTC": (2, "IT"), + } + + assert ( + self._region in self._roi_map + ), "The ROI to compare must either ['V1', 'posteriorVTC', or 'anteriorVTC']" + + # Call the parent class constructor to complete initialization + super(_Bracci2019RSA, self).__init__( + identifier=f"Bracci2019.{region}-rdm", + version=1, + # ceiling_func=lambda: self._metric._ceiling(self._assembly), + ceiling_func=lambda: 1, + parent="Bracci2019", + bibtex=BIBTEX, + ) + + def _center_data_by_subject(self, roi_assembly): + """ + Center the data by subject by subtracting the mean across all conditions for each voxel. + + Args: + roi_assembly (DataArray): The input data array with regions of interest and subject identifiers. + + Returns: + DataArray: The ROI data array with centered data by subject. + """ + # Get unique list of subjects in the data + subjects = np.unique(roi_assembly["subject"]) + centered_data_list = [] + + for subject in subjects: + # Select data for the current subject + subject_data = roi_assembly.sel(neuroid=roi_assembly["subject"] == subject) + # Center the data by subtracting the mean across presentations + subject_centered_data = subject_data - subject_data.mean(dim="presentation") + # Append the centered data for this subject to the list + centered_data_list.append(subject_centered_data) + + # Concatenate all the subject-centered data back into a single assembly + full_centered_assembly = xr.concat(centered_data_list, dim="neuroid") + + return full_centered_assembly + + def _get_human_ceiling(self, roi_centered_assembly): + """ + Calculate the "lower bound" ceiling for human performance based on inter-subject correlations of RDMs. + + Args: + roi_centered_assembly (DataArray): The ROI data array with subject-centered data. + + Returns: + float: The average inter-subject correlation, representing the human performance ceiling. + """ + # Get unique list of subjects in the data + subjects = np.unique(roi_centered_assembly["subject"]) + correlations = [] + + for subject in subjects: + # Generate RDM for the current subject + subject_rdm = self._metric._rdm( + roi_centered_assembly.sel( + neuroid=roi_centered_assembly["subject"] == subject + ) + ).values + + # Generate RDMs for other subjects + other_rdms = [ + self._metric._rdm( + roi_centered_assembly.sel( + neuroid=roi_centered_assembly["subject"] == other_subject + ) + ).values + for other_subject in subjects + if other_subject != subject + ] + + # Compute the average RDM from other subjects + average_rdm = np.mean(other_rdms, axis=0) + + # Extract the lower triangle of the RDM matrix + mask = np.tril(np.ones_like(subject_rdm), -1).astype(bool) + vector_lt_subject = subject_rdm[mask] + vector_lt_average = average_rdm[mask] + + # Compute the Spearman correlation + correlation, _ = spearmanr(vector_lt_subject, vector_lt_average) + + # Append the correlation result + correlations.append(correlation) + + # Compute the average correlation across all subjects + human_ceiling = np.mean(correlations) + + return float(human_ceiling) + + def _average_voxels_across_subjects(self, roi_centered_assembly): + """ + Compute the mean across the 'voxels' dimension to create a new NeuroidAssembly with averaged neuroid data. + + This function averages voxel data within the input neuroid assembly across all subjects and retains important metadata + about presentations and neuroids. + + Args: + roi_centered_assembly (xarray.DataArray): The input data array with regions of interest, voxels, and subject data. + + Returns: + NeuroidAssembly: A new assembly object with averaged neuroid data and associated coordinates. + """ + # Average voxel data across subjects to simplify the neuroid representation + averaged_assy = roi_centered_assembly.groupby("voxels").mean() + averaged_data = averaged_assy.values + + # Construct the NeuroidAssembly with the averaged data and maintain the presentation metadata + assembly = NeuroidAssembly( + averaged_data, + dims=["presentation", "neuroid"], + coords={ + "stimulus_id": ( + "presentation", + roi_centered_assembly["stimulus_id"].values, + ), + "stimulus_name": ( + "presentation", + roi_centered_assembly["stimulus_name"].values, + ), + "exemplar_number": ( + "presentation", + roi_centered_assembly["exemplar_number"].values, + ), + "image_label": ( + "presentation", + roi_centered_assembly["image_label"].values, + ), + "image_group": ( + "presentation", + roi_centered_assembly["image_group"].values, + ), + "voxels": ("neuroid", list(range(averaged_data.shape[-1]))), + }, + ) + return assembly + + def __call__(self, candidate: BrainModel): + """ + + Args: + candidate (BrainModel): The model being evaluated. + + Returns: + float: The similarity score between the model and human data. + """ + # Start the model on the task of predicting confusion probabilities + candidate.start_recording( + self._roi_map[self._region][1], [(TIME_BIN_ST, TIME_BIN_END)] + ) + + # Prepare the stimulus set by placing it on a virtual screen at a scale appropriate for the model + stimulus_set = place_on_screen( + self._stimulus_set, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees, + ) + + # Model looks at the stimulus set + dnn_assembly = candidate.look_at( + stimulus_set, number_of_trials=self._number_of_trials + ) + + # Get the human data + human_data = self._human_assembly + + # Select only the data for the current ROI + roi_idx = self._roi_map[self._region][0] + roi_assembly = human_data.sel(neuroid=human_data["roi"] == roi_idx) + + # Center data (by subject, across all conditions) + roi_centered_assembly = self._center_data_by_subject(roi_assembly) + + # Average voxels across subjects + human_averaged_assembly = self._average_voxels_across_subjects( + roi_centered_assembly + ) + + # Calculate the human ceiling + ceiling = self._get_human_ceiling(roi_centered_assembly) + + # Compare (corr) the two RDMs to get the score + similarity = self._metric(dnn_assembly, human_averaged_assembly) + + # Normalize by ceiling + score = Score(similarity / ceiling) + score.attrs["raw"] = similarity + score.attrs["ceiling"] = ceiling + + return score + + +def Bracci2019RSA(region): + return _Bracci2019RSA(region) diff --git a/brainscore_vision/benchmarks/bracci2019/requirements.txt b/brainscore_vision/benchmarks/bracci2019/requirements.txt new file mode 100644 index 000000000..51fc28119 --- /dev/null +++ b/brainscore_vision/benchmarks/bracci2019/requirements.txt @@ -0,0 +1,3 @@ +scipy +xarray +numpy \ No newline at end of file diff --git a/brainscore_vision/data/bracci2019/__init__.py b/brainscore_vision/data/bracci2019/__init__.py new file mode 100644 index 000000000..fe1653497 --- /dev/null +++ b/brainscore_vision/data/bracci2019/__init__.py @@ -0,0 +1,36 @@ +from brainio.assemblies import NeuroidAssembly +from brainscore_vision import stimulus_set_registry, data_registry +from brainscore_vision.data_helpers.s3 import ( + load_assembly_from_s3, + load_stimulus_set_from_s3, +) + +BIBTEX = """"@article{bracci2019ventral, + title={The ventral visual pathway represents animal appearance over animacy, unlike human behavior and deep neural networks}, + author={Bracci, Stefania and Ritchie, J Brendan and Kalfas, Ioannis and de Beeck, Hans P Op}, + journal={Journal of Neuroscience}, + volume={39}, + number={33}, + pages={6513--6525}, + year={2019}, + publisher={Soc Neuroscience} +}""" + +# Human Stimulus Set +stimulus_set_registry["Bracci2019"] = lambda: load_stimulus_set_from_s3( + identifier="Bracci2019", + bucket="brainio-brainscore", + csv_sha1="05b1af9b8e6ed478ea49339e11b0024c2da8c35f", + zip_sha1="a79b249e758421f46ec781301cd4b498f64853ce", + csv_version_id="0B6H2003B.aOml25nlWsxSTkQib1ndZP", + zip_version_id="DCqvGpnGL7J0JbQ6T6uAax5M5jbe_O8Q", +) + +# Human Data Assembly (brain) +data_registry["Bracci2019"] = lambda: load_assembly_from_s3( + identifier="Bracci2019", + version_id="0w_eqBDiwxlSkuqSGK0sqUz1YhsgvY3C", + sha1="cbec165bb20f09d0527fddba7cfbf115a396a2f3", + bucket="brainio-brainscore", + cls=NeuroidAssembly, +) diff --git a/brainscore_vision/data/bracci2019/data_packaging.py b/brainscore_vision/data/bracci2019/data_packaging.py new file mode 100644 index 000000000..588415cde --- /dev/null +++ b/brainscore_vision/data/bracci2019/data_packaging.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Jun 21 18:51:41 2024 + +@author: costantino_ai +""" + +# Imports necessary libraries +import os +import logging +from pathlib import Path +import numpy as np +from scipy.io import loadmat + +from brainio.assemblies import NeuroidAssembly +from brainio.stimuli import StimulusSet +from brainio.packaging import package_data_assembly, package_stimulus_set +from natsort import natsorted + +# Setup logging +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" +) + +# Constants +ROOT_DIRECTORY = "./bracci2019/private" +TAG = "Bracci2019" + + +# Function to categorize stimulus based on its ID +def categorize_stimulus(stim_id): + if stim_id < 10: + return "lookalike" + elif 10 <= stim_id <= 18: + return "animal" + elif 19 <= stim_id <= 27: + return "object" + else: + raise ValueError("Stimulus ID is out of the expected range") + + +def categorize_group(stim_id): + if stim_id % 9 == 0: + return 9 + return stim_id % 9 + + +def load_stimulus_set(stimuli_directory, tag): + """ + Scans a specified directory for JPEG images and organizes their metadata into a structured format. + + This function navigates to a directory named 'stimuli' within a given root directory, + extracts metadata from the filenames, and categorizes each image based on predefined + criteria. The metadata includes stimulus IDs, categories, and group affiliations, and + is returned as a StimulusSet object. + + :param root: The root directory where the 'stimuli' directory is located. + :type root: str + :return: A StimulusSet object containing metadata and mappings for each stimulus. + :rtype: StimulusSet + """ + + # Initialize lists to collect metadata and mapping of stimulus IDs to file paths + stimuli = [] + stimulus_paths = {} + + # Iterate over each image file in the stimuli directory + for filepath in natsorted(Path(stimuli_directory).glob("*.jpg")): + stimulus_id = filepath.stem + _, stim_id_str = stimulus_id.split("_") + stim_id = int(stim_id_str) + + # Maps each stimulus ID to its file path + stimulus_paths[stimulus_id] = filepath + + # Collects metadata for each stimulus + stimuli.append( + { + "stimulus_id": stimulus_id, + "stimulus_name": f"{stim_id:02d}_{categorize_group(stim_id):02d}_{categorize_stimulus(stim_id)}", + "exemplar_number": stim_id, + "image_label": categorize_stimulus(stim_id), + "image_group": categorize_group(stim_id), + } + ) + + # Convert the list of stimuli into a StimulusSet object + stimulus_set = StimulusSet(stimuli) + stimulus_set.stimulus_paths = stimulus_paths + stimulus_set.name = tag + assert len(stimulus_set) == 27 + logging.info("Total stimuli loaded: %d", len(stimulus_set)) + + return stimulus_set + + +def load_brain_data(mat_file_path, stimulus_set, assembly_name): + """ + Load brain imaging data from a MATLAB file and organize it into a structured data assembly. + + This function retrieves brain activity data segmented by regions of interest (ROIs) from a specified MATLAB file. + It concatenates data across three predefined ROIs (v1, postVTC, antVTC) for each subject and maps it to the + corresponding stimulus metadata. The resulting NeuroidAssembly object is structured to facilitate further analysis. + + Parameters: + - mat_file_path (str): Full path to the MATLAB '.mat' file containing the brain data. + - stimulus_set (pd.DataFrame): DataFrame containing stimulus metadata. + - assembly_name (str): Name to assign to the resulting NeuroidAssembly for identification. + + Returns: + - NeuroidAssembly: An object containing the organized brain data, along with metadata about stimuli, subjects, and ROIs. + + Raises: + - FileNotFoundError: If the specified '.mat' file does not exist. + - AssertionError: To ensure the stimulus IDs in the data assembly match those in the provided stimulus set. + """ + + # Load the MATLAB file + mat_file = loadmat(mat_file_path) + + # Extract brain data for each ROI + v1_data = mat_file["lookalike"]["data"][0][0][0][0][0] + postVTC_data = mat_file["lookalike"]["data"][0][0][0][0][1] + antVTC_data = mat_file["lookalike"]["data"][0][0][0][0][2] + + # Log the shape of data from each ROI for debugging + print( + f"V1 data shape: {v1_data.shape}, PostVTC data shape: {postVTC_data.shape}, AntVTC data shape: {antVTC_data.shape}" + ) + + # Concatenate data across the voxel dimension for each subject + concatenated_data = np.concatenate([v1_data, postVTC_data, antVTC_data], axis=1) + + # Calculate number of conditions and subjects from one of the ROIs + n_conditions, total_voxels, n_subjects = concatenated_data.shape + + # Initialize arrays to store indices + subject_indices = [] + roi_indices = [] + voxel_indices = [] + + # Generate indices arrays for ROI, subjects, and voxels + for subject in range(n_subjects): + total_voxels_per_subject = 0 + for roi, roi_data in enumerate([v1_data, postVTC_data, antVTC_data]): + n_voxels = roi_data.shape[1] + roi_indices.extend([roi] * n_voxels) + subject_indices.extend([subject] * n_voxels) + total_voxels_per_subject += n_voxels + + voxel_indices.extend(list(range(total_voxels_per_subject))) + + # Flatten the concatenated data into two dimensions + flattened_data = concatenated_data.reshape(n_conditions, -1) + + # Convert index lists to numpy arrays for efficient data handling + subject_indices = np.array(subject_indices) + roi_indices = np.array(roi_indices) + + # Create the NeuroidAssembly object with corresponding dimensions and coordinates + assembly = NeuroidAssembly( + flattened_data, + dims=["presentation", "neuroid"], + coords={ + "stimulus_id": ("presentation", stimulus_set["stimulus_id"].values), + "stimulus_name": ("presentation", stimulus_set["stimulus_name"].values), + "exemplar_number": ("presentation", stimulus_set["exemplar_number"].values), + "image_label": ("presentation", stimulus_set["image_label"].values), + "image_group": ("presentation", stimulus_set["image_group"].values), + "roi": ("neuroid", roi_indices), + "subject": ("neuroid", subject_indices), + "voxels": ("neuroid", voxel_indices), + }, + ) + + # Assign a name to the data assembly + assembly.name = assembly_name + + # Ensure the assembly's stimulus IDs match those provided in the stimulus set + assert np.array_equal( + assembly["stimulus_id"].values, stimulus_set["stimulus_id"].values + ), "Stimulus IDs do not match." + + return assembly + + +def main(): + """ + Main function to package stimulus set and experimental data, and upload to S3. + """ + logging.info("Starting the data packaging process.") + + # Load stimuli from directory + human_stimuli_directory = os.path.join(ROOT_DIRECTORY, "stimuli") + human_stimulus_set = load_stimulus_set(human_stimuli_directory, TAG) + + # Upload stimuli + human_stimulus_meta = package_stimulus_set( + None, human_stimulus_set, human_stimulus_set.name, "brainio-brainscore" + ) + + # Load human data assembly + brain_data_path = os.path.join(ROOT_DIRECTORY, "data/lookalike.mat") + data_assembly = load_brain_data(brain_data_path, human_stimulus_set, TAG) + + assembly_meta = package_data_assembly( + None, + data_assembly, + data_assembly.name, + human_stimulus_set.name, + "NeuroidAssembly", + "brainio-brainscore", + ) + + print(human_stimulus_meta) + print(assembly_meta) + + +if __name__ == "__main__": + main() diff --git a/brainscore_vision/data/bracci2019/test.py b/brainscore_vision/data/bracci2019/test.py new file mode 100644 index 000000000..3e78d093f --- /dev/null +++ b/brainscore_vision/data/bracci2019/test.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Jun 20 17:32:25 2024 + +@author: costantino_ai +""" +import pytest +from brainscore_vision import load_dataset, load_stimulus_set + +@pytest.mark.private_access +def test_existence(): + assert load_dataset('Bracci2019') is not None + assert load_stimulus_set('Bracci2019') is not None + + \ No newline at end of file From 74db9a30a9f4ecae389dee4d72029db8d5cc979a Mon Sep 17 00:00:00 2001 From: Michael Ferguson Date: Sat, 6 Jul 2024 05:24:53 -0400 Subject: [PATCH 42/68] Add BMD2024 benchmark (competition) (#1004) * first commit of data * first commit of benchmark * benchmark produces scores * remove stray .nc file * follow naming convention for metric * fix stimulus_id reference * rename `BMD_2024_` -> `BMD2024.` * place stimuli on screen --------- Co-authored-by: Martin Schrimpf Co-authored-by: Martin Schrimpf --- .../benchmarks/bmd2024/__init__.py | 8 + .../benchmarks/bmd2024/benchmark.py | 50 +++ brainscore_vision/benchmarks/bmd2024/test.py | 29 ++ brainscore_vision/data/bmd2024/__init__.py | 69 +++ .../data_packaging/BMD_2024_data_assembly.py | 91 ++++ .../data_packaging/BMD_2024_simulus_set.py | 48 +++ .../data/bmd2024/data_packaging/stim_meta.csv | 401 ++++++++++++++++++ brainscore_vision/data/bmd2024/test.py | 130 ++++++ 8 files changed, 826 insertions(+) create mode 100644 brainscore_vision/benchmarks/bmd2024/__init__.py create mode 100644 brainscore_vision/benchmarks/bmd2024/benchmark.py create mode 100644 brainscore_vision/benchmarks/bmd2024/test.py create mode 100644 brainscore_vision/data/bmd2024/__init__.py create mode 100644 brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py create mode 100644 brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py create mode 100644 brainscore_vision/data/bmd2024/data_packaging/stim_meta.csv create mode 100644 brainscore_vision/data/bmd2024/test.py diff --git a/brainscore_vision/benchmarks/bmd2024/__init__.py b/brainscore_vision/benchmarks/bmd2024/__init__.py new file mode 100644 index 000000000..1cea4b7a6 --- /dev/null +++ b/brainscore_vision/benchmarks/bmd2024/__init__.py @@ -0,0 +1,8 @@ +from brainscore_vision import benchmark_registry +from brainscore_vision.benchmarks.bmd2024.benchmark import BMD2024AccuracyDistance + +# behavioral benchmarks +benchmark_registry['BMD2024.texture_1Behavioral-accuracy_distance'] = lambda: BMD2024AccuracyDistance('texture_1') +benchmark_registry['BMD2024.texture_2Behavioral-accuracy_distance'] = lambda: BMD2024AccuracyDistance('texture_2') +benchmark_registry['BMD2024.dotted_1Behavioral-accuracy_distance'] = lambda: BMD2024AccuracyDistance('dotted_1') +benchmark_registry['BMD2024.dotted_2Behavioral-accuracy_distance'] = lambda: BMD2024AccuracyDistance('dotted_2') diff --git a/brainscore_vision/benchmarks/bmd2024/benchmark.py b/brainscore_vision/benchmarks/bmd2024/benchmark.py new file mode 100644 index 000000000..93009304a --- /dev/null +++ b/brainscore_vision/benchmarks/bmd2024/benchmark.py @@ -0,0 +1,50 @@ +import numpy as np + +from brainio.assemblies import BehavioralAssembly +from brainscore_vision import load_dataset, load_metric +from brainscore_vision.benchmark_helpers.screen import place_on_screen +from brainscore_vision.benchmarks import BenchmarkBase +from brainscore_vision.metrics import Score +from brainscore_vision.model_interface import BrainModel +from brainscore_vision.utils import LazyLoad + +BIBTEX = "" # to appear in a future article + + +class _BMD_2024_BehavioralAccuracyDistance(BenchmarkBase): + # behavioral benchmark + def __init__(self, dataset): + self._metric = load_metric('accuracy_distance') + self._assembly = LazyLoad(lambda: load_assembly(dataset)) + super(_BMD_2024_BehavioralAccuracyDistance, self).__init__( + identifier=f'BMD2024.{dataset}Behavioral-accuracy_distance', version=1, + ceiling_func=lambda: self._metric.ceiling(self._assembly), + parent='BMD2024', + bibtex=BIBTEX) + + def __call__(self, candidate: BrainModel): + choice_labels = set(self._assembly.stimulus_set['truth'].values) + choice_labels = list(sorted(choice_labels)) + candidate.start_task(BrainModel.Task.label, choice_labels) + stimulus_set = place_on_screen(self._assembly.stimulus_set, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees) + labels = candidate.look_at(stimulus_set, number_of_trials=1) + raw_score = self._metric(labels, self._assembly) + ceiling = self.ceiling + score = raw_score / ceiling + # ensure score <= 1.0 + if score.values > 1: + score = Score(np.array(1.)) + score.attrs['raw'] = raw_score + score.attrs['ceiling'] = ceiling + return score + + +def load_assembly(dataset: str) -> BehavioralAssembly: + assembly = load_dataset(f'BMD2024.{dataset}') + return assembly + + +def BMD2024AccuracyDistance(experiment): + return _BMD_2024_BehavioralAccuracyDistance(experiment) diff --git a/brainscore_vision/benchmarks/bmd2024/test.py b/brainscore_vision/benchmarks/bmd2024/test.py new file mode 100644 index 000000000..55b3e1bc6 --- /dev/null +++ b/brainscore_vision/benchmarks/bmd2024/test.py @@ -0,0 +1,29 @@ +import pytest +from pytest import approx + +from brainscore_vision import benchmark_registry, load_benchmark + + +@pytest.mark.parametrize('benchmark', [ + 'BMD2024.texture_1Behavioral-accuracy_distance', + 'BMD2024.texture_2Behavioral-accuracy_distance', + 'BMD2024.dotted_1Behavioral-accuracy_distance', + 'BMD2024.dotted_2Behavioral-accuracy_distance', +]) +def test_benchmark_registry(benchmark): + assert benchmark in benchmark_registry + + +class TestBehavioral: + @pytest.mark.private_access + @pytest.mark.parametrize('dataset, expected_ceiling', [ + ('texture_1', approx(0.98283, abs=0.001)), + ('texture_2', approx(0.97337, abs=0.001)), + ('dotted_1', approx(0.97837, abs=0.001)), + ('dotted_2', approx(0.93071, abs=0.001)), # all of the above are AccuracyDistance + ]) + def test_dataset_ceiling(self, dataset, expected_ceiling): + benchmark = f"BMD2024.{dataset}Behavioral-accuracy_distance" + benchmark = load_benchmark(benchmark) + ceiling = benchmark.ceiling + assert ceiling == expected_ceiling diff --git a/brainscore_vision/data/bmd2024/__init__.py b/brainscore_vision/data/bmd2024/__init__.py new file mode 100644 index 000000000..69d27810b --- /dev/null +++ b/brainscore_vision/data/bmd2024/__init__.py @@ -0,0 +1,69 @@ +from brainio.assemblies import BehavioralAssembly + +from brainscore_vision import data_registry, stimulus_set_registry, load_stimulus_set +from brainscore_vision.data_helpers.s3 import load_assembly_from_s3, load_stimulus_set_from_s3 + +data_registry['BMD2024.texture_1'] = lambda: load_assembly_from_s3( + identifier='BMD_2024_texture_1', + version_id='0WF1iCvgHZGDrCrkigGCfUy3iGTGS69M', + sha1='050cef2bd38fe0e0c6d55c9a4ba0b1c57550a072', + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('BMD2024.texture_1')) + +data_registry['BMD2024.texture_2'] = lambda: load_assembly_from_s3( + identifier='BMD_2024_texture_2', + version_id='Exn4klT3qUYC6aQIanUox7MSjgHLn.t_', + sha1='1f9f4ee938df509c0cbeaec7fdfe0f40997da331', + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('BMD2024.texture_2')) + +data_registry['BMD2024.dotted_1'] = lambda: load_assembly_from_s3( + identifier='BMD_2024_dotted_1', + version_id='tMtSvkgphUn_C9a1WtoAWHmcgrA9oH5o', + sha1='eb16feffe392087b4c40ef249850825f702e7911', + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('BMD2024.dotted_1')) + +data_registry['BMD2024.dotted_2'] = lambda: load_assembly_from_s3( + identifier='BMD_2024_dotted_2', + version_id='JF_vla60eynLMfgqCyjjkn_u8Bb.HQOV', + sha1='297833a094513b99ae434e581df09ac64cd6582f', + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('BMD2024.dotted_2')) + + +stimulus_set_registry['BMD2024.texture_1'] = lambda: load_stimulus_set_from_s3( + identifier='BMD_2024_texture_1', + bucket="brainio-brainscore", + csv_sha1='395911b2933d675b98dda7bae422f11648d8e86d', + zip_sha1='cfde36c93dc9070ef5dfaa0a992c9d2420af3460', + csv_version_id='2815L7UhBfYYCyZ7MgNdPsOXXcRLT1YZ', + zip_version_id='EelSX0uvmm.B6LzTPtQgWbrauUmPHmjl') + +stimulus_set_registry['BMD2024.texture_2'] = lambda: load_stimulus_set_from_s3( + identifier='BMD_2024_texture_2', + bucket="brainio-brainscore", + csv_sha1='98ff8e3a1ca6f632ebc2daa909804314bc1b7e31', + zip_sha1='31c9d8449b25da8ad3cb034eee04db9193027fcb', + csv_version_id='gFqcLlA6xcmv1rpUoADKNsgyNYSV.G6T', + zip_version_id='c6B88cKdrD7YmUA1BXJoUeCMmAUbFzZw') + +stimulus_set_registry['BMD2024.dotted_1'] = lambda: load_stimulus_set_from_s3( + identifier='BMD_2024_dotted_1', + bucket="brainio-brainscore", + csv_sha1='de4214666237a0be39810ec4fefd6ec8d2a2e881', + zip_sha1='b4ab1355665b5bf3bf81b7aa6eccfd396c96bda2', + csv_version_id='8OuzqjMhNuFDu41iEjGPeNBz4I1kxlCV', + zip_version_id='ue19A9hVTDzTKaXkfnJCrKaes.JLTLtv') + +stimulus_set_registry['BMD2024.dotted_2'] = lambda: load_stimulus_set_from_s3( + identifier='BMD_2024_dotted_2', + bucket="brainio-brainscore", + csv_sha1='4555daa5257dee10c6c6a5625d3bb2d94452e294', + zip_sha1='20337c1fac66ed0eec16410c6801cca830e6c20c', + csv_version_id='l4E.CZVizNrwprRZHIjHEppBQK8LpBHM', + zip_version_id='.a1HOsF9Q3uwfxY5NHZf2XNuj6brdiTK') \ No newline at end of file diff --git a/brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py b/brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py new file mode 100644 index 000000000..bcdd4dcc6 --- /dev/null +++ b/brainscore_vision/data/bmd2024/data_packaging/BMD_2024_data_assembly.py @@ -0,0 +1,91 @@ +from pathlib import Path + +import pandas as pd + +from brainio.assemblies import BehavioralAssembly +from brainio.packaging import package_data_assembly + +''' +Experiment information + +- 4 experimental conditions + - Texturized stimuli 1 --> 'texture_1' + - Texturized stimuli 2 --> 'texture_2' + - Dotted outlines 1 --> 'dotted_1' + - Dotted outlines 2 --> 'dotted_2' + +Texture stimuli are silhouettes filled with a repeating character that produces the texture. +texture_1 --> character is "3" +texture_2 --> character is "%" + +Dotted outlines can have dots at various distances and the two conditions correspond to a smaller +and larger distance. + +There were 10 images per condition per eahch of the 10 categories +- 'airplane' +- 'bear' +- 'bicycle' +- 'bird' +- 'bottle' +- 'car' +- 'cat' +- 'chair' +- 'elephant' +- 'knife' + +Between 51 and 54 partticipants completed each of the conditions (a total of 211 participants). +''' + +CONDITIONS = ['texture_1', 'texture_2', 'dotted_1', 'dotted_2'] + +def collect_BMD2024_behavioral_assembly(data_path,condition): + data = pd.read_csv(data_path) + filtered_data = data[data['condition'] == condition] + + assembly = BehavioralAssembly(filtered_data['subject_answer'], + coords={ + 'subject': ('presentation', filtered_data['subject']), + 'subject_answer': ('presentation', filtered_data['subject_answer']), + 'stimulus_id': ('presentation', filtered_data['stimulus_id']), + 'truth': ('presentation', filtered_data['truth']), + 'condition': ('presentation', filtered_data['condition']) + }, + dims=['presentation'] + ) + + assembly.name = f'BMD_2024_{condition}' + return assembly + + +if __name__ == '__main__': + data_path = Path('Data/BDM_2024_behavioral_data.csv') + for condition in CONDITIONS: + if condition == 'texture_1': + num_dims = 5100 + num_subjects = 51 + num_stimuli = 100 + elif condition == 'texture_2': + num_dims = 5200 + num_subjects = 52 + num_stimuli = 100 + elif condition == ('dotted_1' or 'dotted_2'): + num_dims = 5400 + num_subjects = 54 + num_categories = 10 + + assembly = collect_BMD2024_behavioral_assembly(data_path, condition) + + # make sure assembly dims are correct length + assert len(assembly['presentation']) == num_dims + assert len(set(assembly['subject'].values)) == num_subjects + assert len(set(assembly['stimulus_id'].values)) == num_stimuli + assert len(set(assembly['truth'].values)) == num_categories + + prints = package_data_assembly(catalog_identifier=None, + proto_data_assembly=assembly, + assembly_identifier=assembly.name, + stimulus_set_identifier=assembly.name, + assembly_class_name="BehavioralAssembly", + bucket_name="brainio-brainscore") + + print(prints) \ No newline at end of file diff --git a/brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py b/brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py new file mode 100644 index 000000000..129cb79d5 --- /dev/null +++ b/brainscore_vision/data/bmd2024/data_packaging/BMD_2024_simulus_set.py @@ -0,0 +1,48 @@ +import csv + +from brainio.stimuli import StimulusSet +from brainio.packaging import package_stimulus_set + +CONDITIONS = ['texture_1', 'texture_2', 'dotted_1', 'dotted_2'] + + +def collect_BMD_2024_stimulus_set(condition, stimuli_directory, meta_filepath): + stimuli = [] + stimulus_paths = {} + + with open(meta_filepath, 'r') as metadata: + reader = csv.DictReader(metadata) + for row in reader: + stimulus_meta = { + 'stimulus_id' : row['stimulus_id'], + 'condition' : row['condition'], + 'truth' : row['truth'] + } + if row['condition'] == condition: + stimuli.append(stimulus_meta) + stimulus_paths[row['stimulus_id']] = f'{stimuli_directory}/{row["stimulus_id"]}.png' + + stimuli_assembly = StimulusSet(stimuli) + stimuli_assembly.stimulus_paths = stimulus_paths + stimuli_assembly.name = f'BMD_2024_{condition}' + stimuli_assembly.identifier = f'BMD_2024_{condition}' + + return stimuli_assembly + + +if __name__ == '__main__': + stimuli_directory = 'Stimuli_set' + meta_filepath = 'stim_meta.csv' + + for condition in CONDITIONS: + assembly = collect_BMD_2024_stimulus_set(condition, stimuli_directory, meta_filepath) + + assert len(assembly) == 100 + + prints = package_stimulus_set(catalog_name=None, + proto_stimulus_set=assembly, + stimulus_set_identifier=assembly.name, + bucket_name="brainio-brainscore") + print(prints) + + diff --git a/brainscore_vision/data/bmd2024/data_packaging/stim_meta.csv b/brainscore_vision/data/bmd2024/data_packaging/stim_meta.csv new file mode 100644 index 000000000..959936436 --- /dev/null +++ b/brainscore_vision/data/bmd2024/data_packaging/stim_meta.csv @@ -0,0 +1,401 @@ +stimulus_id,stimulus_paths,truth,condition +Airplane_11_d1,Stimuli_set/Airplane_11_d1.png,airplane,dotted_1 +Airplane_11_d2,Stimuli_set/Airplane_11_d2.png,airplane,dotted_2 +Airplane_11_t1,Stimuli_set/Airplane_11_t1.png,airplane,texture_1 +Airplane_11_t2,Stimuli_set/Airplane_11_t2.png,airplane,texture_2 +Airplane_12_d1,Stimuli_set/Airplane_12_d1.png,airplane,dotted_1 +Airplane_12_d2,Stimuli_set/Airplane_12_d2.png,airplane,dotted_2 +Airplane_12_t1,Stimuli_set/Airplane_12_t1.png,airplane,texture_1 +Airplane_12_t2,Stimuli_set/Airplane_12_t2.png,airplane,texture_2 +Airplane_13_d1,Stimuli_set/Airplane_13_d1.png,airplane,dotted_1 +Airplane_13_d2,Stimuli_set/Airplane_13_d2.png,airplane,dotted_2 +Airplane_13_t1,Stimuli_set/Airplane_13_t1.png,airplane,texture_1 +Airplane_13_t2,Stimuli_set/Airplane_13_t2.png,airplane,texture_2 +Airplane_14_d1,Stimuli_set/Airplane_14_d1.png,airplane,dotted_1 +Airplane_14_d2,Stimuli_set/Airplane_14_d2.png,airplane,dotted_2 +Airplane_14_t1,Stimuli_set/Airplane_14_t1.png,airplane,texture_1 +Airplane_14_t2,Stimuli_set/Airplane_14_t2.png,airplane,texture_2 +Airplane_15_d1,Stimuli_set/Airplane_15_d1.png,airplane,dotted_1 +Airplane_15_d2,Stimuli_set/Airplane_15_d2.png,airplane,dotted_2 +Airplane_15_t1,Stimuli_set/Airplane_15_t1.png,airplane,texture_1 +Airplane_15_t2,Stimuli_set/Airplane_15_t2.png,airplane,texture_2 +Airplane_1_d1,Stimuli_set/Airplane_1_d1.png,airplane,dotted_1 +Airplane_1_d2,Stimuli_set/Airplane_1_d2.png,airplane,dotted_2 +Airplane_1_t1,Stimuli_set/Airplane_1_t1.png,airplane,texture_1 +Airplane_1_t2,Stimuli_set/Airplane_1_t2.png,airplane,texture_2 +Airplane_2_d1,Stimuli_set/Airplane_2_d1.png,airplane,dotted_1 +Airplane_2_d2,Stimuli_set/Airplane_2_d2.png,airplane,dotted_2 +Airplane_2_t1,Stimuli_set/Airplane_2_t1.png,airplane,texture_1 +Airplane_2_t2,Stimuli_set/Airplane_2_t2.png,airplane,texture_2 +Airplane_3_d1,Stimuli_set/Airplane_3_d1.png,airplane,dotted_1 +Airplane_3_d2,Stimuli_set/Airplane_3_d2.png,airplane,dotted_2 +Airplane_3_t1,Stimuli_set/Airplane_3_t1.png,airplane,texture_1 +Airplane_3_t2,Stimuli_set/Airplane_3_t2.png,airplane,texture_2 +Airplane_8_d1,Stimuli_set/Airplane_8_d1.png,airplane,dotted_1 +Airplane_8_d2,Stimuli_set/Airplane_8_d2.png,airplane,dotted_2 +Airplane_8_t1,Stimuli_set/Airplane_8_t1.png,airplane,texture_1 +Airplane_8_t2,Stimuli_set/Airplane_8_t2.png,airplane,texture_2 +Airplane_9_d1,Stimuli_set/Airplane_9_d1.png,airplane,dotted_1 +Airplane_9_d2,Stimuli_set/Airplane_9_d2.png,airplane,dotted_2 +Airplane_9_t1,Stimuli_set/Airplane_9_t1.png,airplane,texture_1 +Airplane_9_t2,Stimuli_set/Airplane_9_t2.png,airplane,texture_2 +Bear_10_d1,Stimuli_set/Bear_10_d1.png,bear,dotted_1 +Bear_10_d2,Stimuli_set/Bear_10_d2.png,bear,dotted_2 +Bear_10_t1,Stimuli_set/Bear_10_t1.png,bear,texture_1 +Bear_10_t2,Stimuli_set/Bear_10_t2.png,bear,texture_2 +Bear_11_d1,Stimuli_set/Bear_11_d1.png,bear,dotted_1 +Bear_11_d2,Stimuli_set/Bear_11_d2.png,bear,dotted_2 +Bear_11_t1,Stimuli_set/Bear_11_t1.png,bear,texture_1 +Bear_11_t2,Stimuli_set/Bear_11_t2.png,bear,texture_2 +Bear_12_d1,Stimuli_set/Bear_12_d1.png,bear,dotted_1 +Bear_12_d2,Stimuli_set/Bear_12_d2.png,bear,dotted_2 +Bear_12_t1,Stimuli_set/Bear_12_t1.png,bear,texture_1 +Bear_12_t2,Stimuli_set/Bear_12_t2.png,bear,texture_2 +Bear_13_d1,Stimuli_set/Bear_13_d1.png,bear,dotted_1 +Bear_13_d2,Stimuli_set/Bear_13_d2.png,bear,dotted_2 +Bear_13_t1,Stimuli_set/Bear_13_t1.png,bear,texture_1 +Bear_13_t2,Stimuli_set/Bear_13_t2.png,bear,texture_2 +Bear_15_d1,Stimuli_set/Bear_15_d1.png,bear,dotted_1 +Bear_15_d2,Stimuli_set/Bear_15_d2.png,bear,dotted_2 +Bear_15_t1,Stimuli_set/Bear_15_t1.png,bear,texture_1 +Bear_15_t2,Stimuli_set/Bear_15_t2.png,bear,texture_2 +Bear_1_d1,Stimuli_set/Bear_1_d1.png,bear,dotted_1 +Bear_1_d2,Stimuli_set/Bear_1_d2.png,bear,dotted_2 +Bear_1_t1,Stimuli_set/Bear_1_t1.png,bear,texture_1 +Bear_1_t2,Stimuli_set/Bear_1_t2.png,bear,texture_2 +Bear_2_d1,Stimuli_set/Bear_2_d1.png,bear,dotted_1 +Bear_2_d2,Stimuli_set/Bear_2_d2.png,bear,dotted_2 +Bear_2_t1,Stimuli_set/Bear_2_t1.png,bear,texture_1 +Bear_2_t2,Stimuli_set/Bear_2_t2.png,bear,texture_2 +Bear_3_d1,Stimuli_set/Bear_3_d1.png,bear,dotted_1 +Bear_3_d2,Stimuli_set/Bear_3_d2.png,bear,dotted_2 +Bear_3_t1,Stimuli_set/Bear_3_t1.png,bear,texture_1 +Bear_3_t2,Stimuli_set/Bear_3_t2.png,bear,texture_2 +Bear_7_d1,Stimuli_set/Bear_7_d1.png,bear,dotted_1 +Bear_7_d2,Stimuli_set/Bear_7_d2.png,bear,dotted_2 +Bear_7_t1,Stimuli_set/Bear_7_t1.png,bear,texture_1 +Bear_7_t2,Stimuli_set/Bear_7_t2.png,bear,texture_2 +Bear_8_d1,Stimuli_set/Bear_8_d1.png,bear,dotted_1 +Bear_8_d2,Stimuli_set/Bear_8_d2.png,bear,dotted_2 +Bear_8_t1,Stimuli_set/Bear_8_t1.png,bear,texture_1 +Bear_8_t2,Stimuli_set/Bear_8_t2.png,bear,texture_2 +Bicycle_10_d1,Stimuli_set/Bicycle_10_d1.png,bicycle,dotted_1 +Bicycle_10_d2,Stimuli_set/Bicycle_10_d2.png,bicycle,dotted_2 +Bicycle_10_t1,Stimuli_set/Bicycle_10_t1.png,bicycle,texture_1 +Bicycle_10_t2,Stimuli_set/Bicycle_10_t2.png,bicycle,texture_2 +Bicycle_11_d1,Stimuli_set/Bicycle_11_d1.png,bicycle,dotted_1 +Bicycle_11_d2,Stimuli_set/Bicycle_11_d2.png,bicycle,dotted_2 +Bicycle_11_t1,Stimuli_set/Bicycle_11_t1.png,bicycle,texture_1 +Bicycle_11_t2,Stimuli_set/Bicycle_11_t2.png,bicycle,texture_2 +Bicycle_12_d1,Stimuli_set/Bicycle_12_d1.png,bicycle,dotted_1 +Bicycle_12_d2,Stimuli_set/Bicycle_12_d2.png,bicycle,dotted_2 +Bicycle_12_t1,Stimuli_set/Bicycle_12_t1.png,bicycle,texture_1 +Bicycle_12_t2,Stimuli_set/Bicycle_12_t2.png,bicycle,texture_2 +Bicycle_13_d1,Stimuli_set/Bicycle_13_d1.png,bicycle,dotted_1 +Bicycle_13_d2,Stimuli_set/Bicycle_13_d2.png,bicycle,dotted_2 +Bicycle_13_t1,Stimuli_set/Bicycle_13_t1.png,bicycle,texture_1 +Bicycle_13_t2,Stimuli_set/Bicycle_13_t2.png,bicycle,texture_2 +Bicycle_1_d1,Stimuli_set/Bicycle_1_d1.png,bicycle,dotted_1 +Bicycle_1_d2,Stimuli_set/Bicycle_1_d2.png,bicycle,dotted_2 +Bicycle_1_t1,Stimuli_set/Bicycle_1_t1.png,bicycle,texture_1 +Bicycle_1_t2,Stimuli_set/Bicycle_1_t2.png,bicycle,texture_2 +Bicycle_2_d1,Stimuli_set/Bicycle_2_d1.png,bicycle,dotted_1 +Bicycle_2_d2,Stimuli_set/Bicycle_2_d2.png,bicycle,dotted_2 +Bicycle_2_t1,Stimuli_set/Bicycle_2_t1.png,bicycle,texture_1 +Bicycle_2_t2,Stimuli_set/Bicycle_2_t2.png,bicycle,texture_2 +Bicycle_5_d1,Stimuli_set/Bicycle_5_d1.png,bicycle,dotted_1 +Bicycle_5_d2,Stimuli_set/Bicycle_5_d2.png,bicycle,dotted_2 +Bicycle_5_t1,Stimuli_set/Bicycle_5_t1.png,bicycle,texture_1 +Bicycle_5_t2,Stimuli_set/Bicycle_5_t2.png,bicycle,texture_2 +Bicycle_6_d1,Stimuli_set/Bicycle_6_d1.png,bicycle,dotted_1 +Bicycle_6_d2,Stimuli_set/Bicycle_6_d2.png,bicycle,dotted_2 +Bicycle_6_t1,Stimuli_set/Bicycle_6_t1.png,bicycle,texture_1 +Bicycle_6_t2,Stimuli_set/Bicycle_6_t2.png,bicycle,texture_2 +Bicycle_7_d1,Stimuli_set/Bicycle_7_d1.png,bicycle,dotted_1 +Bicycle_7_d2,Stimuli_set/Bicycle_7_d2.png,bicycle,dotted_2 +Bicycle_7_t1,Stimuli_set/Bicycle_7_t1.png,bicycle,texture_1 +Bicycle_7_t2,Stimuli_set/Bicycle_7_t2.png,bicycle,texture_2 +Bicycle_8_d1,Stimuli_set/Bicycle_8_d1.png,bicycle,dotted_1 +Bicycle_8_d2,Stimuli_set/Bicycle_8_d2.png,bicycle,dotted_2 +Bicycle_8_t1,Stimuli_set/Bicycle_8_t1.png,bicycle,texture_1 +Bicycle_8_t2,Stimuli_set/Bicycle_8_t2.png,bicycle,texture_2 +Bird_13_d1,Stimuli_set/Bird_13_d1.png,bird,dotted_1 +Bird_13_d2,Stimuli_set/Bird_13_d2.png,bird,dotted_2 +Bird_13_t1,Stimuli_set/Bird_13_t1.png,bird,texture_1 +Bird_13_t2,Stimuli_set/Bird_13_t2.png,bird,texture_2 +Bird_14_d1,Stimuli_set/Bird_14_d1.png,bird,dotted_1 +Bird_14_d2,Stimuli_set/Bird_14_d2.png,bird,dotted_2 +Bird_14_t1,Stimuli_set/Bird_14_t1.png,bird,texture_1 +Bird_14_t2,Stimuli_set/Bird_14_t2.png,bird,texture_2 +Bird_15_d1,Stimuli_set/Bird_15_d1.png,bird,dotted_1 +Bird_15_d2,Stimuli_set/Bird_15_d2.png,bird,dotted_2 +Bird_15_t1,Stimuli_set/Bird_15_t1.png,bird,texture_1 +Bird_15_t2,Stimuli_set/Bird_15_t2.png,bird,texture_2 +Bird_1_d1,Stimuli_set/Bird_1_d1.png,bird,dotted_1 +Bird_1_d2,Stimuli_set/Bird_1_d2.png,bird,dotted_2 +Bird_1_t1,Stimuli_set/Bird_1_t1.png,bird,texture_1 +Bird_1_t2,Stimuli_set/Bird_1_t2.png,bird,texture_2 +Bird_2_d1,Stimuli_set/Bird_2_d1.png,bird,dotted_1 +Bird_2_d2,Stimuli_set/Bird_2_d2.png,bird,dotted_2 +Bird_2_t1,Stimuli_set/Bird_2_t1.png,bird,texture_1 +Bird_2_t2,Stimuli_set/Bird_2_t2.png,bird,texture_2 +Bird_3_d1,Stimuli_set/Bird_3_d1.png,bird,dotted_1 +Bird_3_d2,Stimuli_set/Bird_3_d2.png,bird,dotted_2 +Bird_3_t1,Stimuli_set/Bird_3_t1.png,bird,texture_1 +Bird_3_t2,Stimuli_set/Bird_3_t2.png,bird,texture_2 +Bird_4_d1,Stimuli_set/Bird_4_d1.png,bird,dotted_1 +Bird_4_d2,Stimuli_set/Bird_4_d2.png,bird,dotted_2 +Bird_4_t1,Stimuli_set/Bird_4_t1.png,bird,texture_1 +Bird_4_t2,Stimuli_set/Bird_4_t2.png,bird,texture_2 +Bird_5_d1,Stimuli_set/Bird_5_d1.png,bird,dotted_1 +Bird_5_d2,Stimuli_set/Bird_5_d2.png,bird,dotted_2 +Bird_5_t1,Stimuli_set/Bird_5_t1.png,bird,texture_1 +Bird_5_t2,Stimuli_set/Bird_5_t2.png,bird,texture_2 +Bird_6_d1,Stimuli_set/Bird_6_d1.png,bird,dotted_1 +Bird_6_d2,Stimuli_set/Bird_6_d2.png,bird,dotted_2 +Bird_6_t1,Stimuli_set/Bird_6_t1.png,bird,texture_1 +Bird_6_t2,Stimuli_set/Bird_6_t2.png,bird,texture_2 +Bird_7_d1,Stimuli_set/Bird_7_d1.png,bird,dotted_1 +Bird_7_d2,Stimuli_set/Bird_7_d2.png,bird,dotted_2 +Bird_7_t1,Stimuli_set/Bird_7_t1.png,bird,texture_1 +Bird_7_t2,Stimuli_set/Bird_7_t2.png,bird,texture_2 +Bottle_10_d1,Stimuli_set/Bottle_10_d1.png,bottle,dotted_1 +Bottle_10_d2,Stimuli_set/Bottle_10_d2.png,bottle,dotted_2 +Bottle_10_t1,Stimuli_set/Bottle_10_t1.png,bottle,texture_1 +Bottle_10_t2,Stimuli_set/Bottle_10_t2.png,bottle,texture_2 +Bottle_11_d1,Stimuli_set/Bottle_11_d1.png,bottle,dotted_1 +Bottle_11_d2,Stimuli_set/Bottle_11_d2.png,bottle,dotted_2 +Bottle_11_t1,Stimuli_set/Bottle_11_t1.png,bottle,texture_1 +Bottle_11_t2,Stimuli_set/Bottle_11_t2.png,bottle,texture_2 +Bottle_12_d1,Stimuli_set/Bottle_12_d1.png,bottle,dotted_1 +Bottle_12_d2,Stimuli_set/Bottle_12_d2.png,bottle,dotted_2 +Bottle_12_t1,Stimuli_set/Bottle_12_t1.png,bottle,texture_1 +Bottle_12_t2,Stimuli_set/Bottle_12_t2.png,bottle,texture_2 +Bottle_13_d1,Stimuli_set/Bottle_13_d1.png,bottle,dotted_1 +Bottle_13_d2,Stimuli_set/Bottle_13_d2.png,bottle,dotted_2 +Bottle_13_t1,Stimuli_set/Bottle_13_t1.png,bottle,texture_1 +Bottle_13_t2,Stimuli_set/Bottle_13_t2.png,bottle,texture_2 +Bottle_14_d1,Stimuli_set/Bottle_14_d1.png,bottle,dotted_1 +Bottle_14_d2,Stimuli_set/Bottle_14_d2.png,bottle,dotted_2 +Bottle_14_t1,Stimuli_set/Bottle_14_t1.png,bottle,texture_1 +Bottle_14_t2,Stimuli_set/Bottle_14_t2.png,bottle,texture_2 +Bottle_15_d1,Stimuli_set/Bottle_15_d1.png,bottle,dotted_1 +Bottle_15_d2,Stimuli_set/Bottle_15_d2.png,bottle,dotted_2 +Bottle_15_t1,Stimuli_set/Bottle_15_t1.png,bottle,texture_1 +Bottle_15_t2,Stimuli_set/Bottle_15_t2.png,bottle,texture_2 +Bottle_3_d1,Stimuli_set/Bottle_3_d1.png,bottle,dotted_1 +Bottle_3_d2,Stimuli_set/Bottle_3_d2.png,bottle,dotted_2 +Bottle_3_t1,Stimuli_set/Bottle_3_t1.png,bottle,texture_1 +Bottle_3_t2,Stimuli_set/Bottle_3_t2.png,bottle,texture_2 +Bottle_4_d1,Stimuli_set/Bottle_4_d1.png,bottle,dotted_1 +Bottle_4_d2,Stimuli_set/Bottle_4_d2.png,bottle,dotted_2 +Bottle_4_t1,Stimuli_set/Bottle_4_t1.png,bottle,texture_1 +Bottle_4_t2,Stimuli_set/Bottle_4_t2.png,bottle,texture_2 +Bottle_5_d1,Stimuli_set/Bottle_5_d1.png,bottle,dotted_1 +Bottle_5_d2,Stimuli_set/Bottle_5_d2.png,bottle,dotted_2 +Bottle_5_t1,Stimuli_set/Bottle_5_t1.png,bottle,texture_1 +Bottle_5_t2,Stimuli_set/Bottle_5_t2.png,bottle,texture_2 +Bottle_9_d1,Stimuli_set/Bottle_9_d1.png,bottle,dotted_1 +Bottle_9_d2,Stimuli_set/Bottle_9_d2.png,bottle,dotted_2 +Bottle_9_t1,Stimuli_set/Bottle_9_t1.png,bottle,texture_1 +Bottle_9_t2,Stimuli_set/Bottle_9_t2.png,bottle,texture_2 +Car_11_d1,Stimuli_set/Car_11_d1.png,car,dotted_1 +Car_11_d2,Stimuli_set/Car_11_d2.png,car,dotted_2 +Car_11_t1,Stimuli_set/Car_11_t1.png,car,texture_1 +Car_11_t2,Stimuli_set/Car_11_t2.png,car,texture_2 +Car_12_d1,Stimuli_set/Car_12_d1.png,car,dotted_1 +Car_12_d2,Stimuli_set/Car_12_d2.png,car,dotted_2 +Car_12_t1,Stimuli_set/Car_12_t1.png,car,texture_1 +Car_12_t2,Stimuli_set/Car_12_t2.png,car,texture_2 +Car_13_d1,Stimuli_set/Car_13_d1.png,car,dotted_1 +Car_13_d2,Stimuli_set/Car_13_d2.png,car,dotted_2 +Car_13_t1,Stimuli_set/Car_13_t1.png,car,texture_1 +Car_13_t2,Stimuli_set/Car_13_t2.png,car,texture_2 +Car_14_d1,Stimuli_set/Car_14_d1.png,car,dotted_1 +Car_14_d2,Stimuli_set/Car_14_d2.png,car,dotted_2 +Car_14_t1,Stimuli_set/Car_14_t1.png,car,texture_1 +Car_14_t2,Stimuli_set/Car_14_t2.png,car,texture_2 +Car_15_d1,Stimuli_set/Car_15_d1.png,car,dotted_1 +Car_15_d2,Stimuli_set/Car_15_d2.png,car,dotted_2 +Car_15_t1,Stimuli_set/Car_15_t1.png,car,texture_1 +Car_15_t2,Stimuli_set/Car_15_t2.png,car,texture_2 +Car_1_d1,Stimuli_set/Car_1_d1.png,car,dotted_1 +Car_1_d2,Stimuli_set/Car_1_d2.png,car,dotted_2 +Car_1_t1,Stimuli_set/Car_1_t1.png,car,texture_1 +Car_1_t2,Stimuli_set/Car_1_t2.png,car,texture_2 +Car_3_d1,Stimuli_set/Car_3_d1.png,car,dotted_1 +Car_3_d2,Stimuli_set/Car_3_d2.png,car,dotted_2 +Car_3_t1,Stimuli_set/Car_3_t1.png,car,texture_1 +Car_3_t2,Stimuli_set/Car_3_t2.png,car,texture_2 +Car_4_d1,Stimuli_set/Car_4_d1.png,car,dotted_1 +Car_4_d2,Stimuli_set/Car_4_d2.png,car,dotted_2 +Car_4_t1,Stimuli_set/Car_4_t1.png,car,texture_1 +Car_4_t2,Stimuli_set/Car_4_t2.png,car,texture_2 +Car_5_d1,Stimuli_set/Car_5_d1.png,car,dotted_1 +Car_5_d2,Stimuli_set/Car_5_d2.png,car,dotted_2 +Car_5_t1,Stimuli_set/Car_5_t1.png,car,texture_1 +Car_5_t2,Stimuli_set/Car_5_t2.png,car,texture_2 +Car_7_d1,Stimuli_set/Car_7_d1.png,car,dotted_1 +Car_7_d2,Stimuli_set/Car_7_d2.png,car,dotted_2 +Car_7_t1,Stimuli_set/Car_7_t1.png,car,texture_1 +Car_7_t2,Stimuli_set/Car_7_t2.png,car,texture_2 +Cat_10_d1,Stimuli_set/Cat_10_d1.png,cat,dotted_1 +Cat_10_d2,Stimuli_set/Cat_10_d2.png,cat,dotted_2 +Cat_10_t1,Stimuli_set/Cat_10_t1.png,cat,texture_1 +Cat_10_t2,Stimuli_set/Cat_10_t2.png,cat,texture_2 +Cat_11_d1,Stimuli_set/Cat_11_d1.png,cat,dotted_1 +Cat_11_d2,Stimuli_set/Cat_11_d2.png,cat,dotted_2 +Cat_11_t1,Stimuli_set/Cat_11_t1.png,cat,texture_1 +Cat_11_t2,Stimuli_set/Cat_11_t2.png,cat,texture_2 +Cat_12_d1,Stimuli_set/Cat_12_d1.png,cat,dotted_1 +Cat_12_d2,Stimuli_set/Cat_12_d2.png,cat,dotted_2 +Cat_12_t1,Stimuli_set/Cat_12_t1.png,cat,texture_1 +Cat_12_t2,Stimuli_set/Cat_12_t2.png,cat,texture_2 +Cat_13_d1,Stimuli_set/Cat_13_d1.png,cat,dotted_1 +Cat_13_d2,Stimuli_set/Cat_13_d2.png,cat,dotted_2 +Cat_13_t1,Stimuli_set/Cat_13_t1.png,cat,texture_1 +Cat_13_t2,Stimuli_set/Cat_13_t2.png,cat,texture_2 +Cat_15_d1,Stimuli_set/Cat_15_d1.png,cat,dotted_1 +Cat_15_d2,Stimuli_set/Cat_15_d2.png,cat,dotted_2 +Cat_15_t1,Stimuli_set/Cat_15_t1.png,cat,texture_1 +Cat_15_t2,Stimuli_set/Cat_15_t2.png,cat,texture_2 +Cat_2_d1,Stimuli_set/Cat_2_d1.png,cat,dotted_1 +Cat_2_d2,Stimuli_set/Cat_2_d2.png,cat,dotted_2 +Cat_2_t1,Stimuli_set/Cat_2_t1.png,cat,texture_1 +Cat_2_t2,Stimuli_set/Cat_2_t2.png,cat,texture_2 +Cat_4_d1,Stimuli_set/Cat_4_d1.png,cat,dotted_1 +Cat_4_d2,Stimuli_set/Cat_4_d2.png,cat,dotted_2 +Cat_4_t1,Stimuli_set/Cat_4_t1.png,cat,texture_1 +Cat_4_t2,Stimuli_set/Cat_4_t2.png,cat,texture_2 +Cat_7_d1,Stimuli_set/Cat_7_d1.png,cat,dotted_1 +Cat_7_d2,Stimuli_set/Cat_7_d2.png,cat,dotted_2 +Cat_7_t1,Stimuli_set/Cat_7_t1.png,cat,texture_1 +Cat_7_t2,Stimuli_set/Cat_7_t2.png,cat,texture_2 +Cat_8_d1,Stimuli_set/Cat_8_d1.png,cat,dotted_1 +Cat_8_d2,Stimuli_set/Cat_8_d2.png,cat,dotted_2 +Cat_8_t1,Stimuli_set/Cat_8_t1.png,cat,texture_1 +Cat_8_t2,Stimuli_set/Cat_8_t2.png,cat,texture_2 +Cat_9_d1,Stimuli_set/Cat_9_d1.png,cat,dotted_1 +Cat_9_d2,Stimuli_set/Cat_9_d2.png,cat,dotted_2 +Cat_9_t1,Stimuli_set/Cat_9_t1.png,cat,texture_1 +Cat_9_t2,Stimuli_set/Cat_9_t2.png,cat,texture_2 +Chair_11_d1,Stimuli_set/Chair_11_d1.png,chair,dotted_1 +Chair_11_d2,Stimuli_set/Chair_11_d2.png,chair,dotted_2 +Chair_11_t1,Stimuli_set/Chair_11_t1.png,chair,texture_1 +Chair_11_t2,Stimuli_set/Chair_11_t2.png,chair,texture_2 +Chair_12_d1,Stimuli_set/Chair_12_d1.png,chair,dotted_1 +Chair_12_d2,Stimuli_set/Chair_12_d2.png,chair,dotted_2 +Chair_12_t1,Stimuli_set/Chair_12_t1.png,chair,texture_1 +Chair_12_t2,Stimuli_set/Chair_12_t2.png,chair,texture_2 +Chair_13_d1,Stimuli_set/Chair_13_d1.png,chair,dotted_1 +Chair_13_d2,Stimuli_set/Chair_13_d2.png,chair,dotted_2 +Chair_13_t1,Stimuli_set/Chair_13_t1.png,chair,texture_1 +Chair_13_t2,Stimuli_set/Chair_13_t2.png,chair,texture_2 +Chair_14_d1,Stimuli_set/Chair_14_d1.png,chair,dotted_1 +Chair_14_d2,Stimuli_set/Chair_14_d2.png,chair,dotted_2 +Chair_14_t1,Stimuli_set/Chair_14_t1.png,chair,texture_1 +Chair_14_t2,Stimuli_set/Chair_14_t2.png,chair,texture_2 +Chair_1_d1,Stimuli_set/Chair_1_d1.png,chair,dotted_1 +Chair_1_d2,Stimuli_set/Chair_1_d2.png,chair,dotted_2 +Chair_1_t1,Stimuli_set/Chair_1_t1.png,chair,texture_1 +Chair_1_t2,Stimuli_set/Chair_1_t2.png,chair,texture_2 +Chair_2_d1,Stimuli_set/Chair_2_d1.png,chair,dotted_1 +Chair_2_d2,Stimuli_set/Chair_2_d2.png,chair,dotted_2 +Chair_2_t1,Stimuli_set/Chair_2_t1.png,chair,texture_1 +Chair_2_t2,Stimuli_set/Chair_2_t2.png,chair,texture_2 +Chair_3_d1,Stimuli_set/Chair_3_d1.png,chair,dotted_1 +Chair_3_d2,Stimuli_set/Chair_3_d2.png,chair,dotted_2 +Chair_3_t1,Stimuli_set/Chair_3_t1.png,chair,texture_1 +Chair_3_t2,Stimuli_set/Chair_3_t2.png,chair,texture_2 +Chair_4_d1,Stimuli_set/Chair_4_d1.png,chair,dotted_1 +Chair_4_d2,Stimuli_set/Chair_4_d2.png,chair,dotted_2 +Chair_4_t1,Stimuli_set/Chair_4_t1.png,chair,texture_1 +Chair_4_t2,Stimuli_set/Chair_4_t2.png,chair,texture_2 +Chair_6_d1,Stimuli_set/Chair_6_d1.png,chair,dotted_1 +Chair_6_d2,Stimuli_set/Chair_6_d2.png,chair,dotted_2 +Chair_6_t1,Stimuli_set/Chair_6_t1.png,chair,texture_1 +Chair_6_t2,Stimuli_set/Chair_6_t2.png,chair,texture_2 +Chair_8_d1,Stimuli_set/Chair_8_d1.png,chair,dotted_1 +Chair_8_d2,Stimuli_set/Chair_8_d2.png,chair,dotted_2 +Chair_8_t1,Stimuli_set/Chair_8_t1.png,chair,texture_1 +Chair_8_t2,Stimuli_set/Chair_8_t2.png,chair,texture_2 +Elephant_10_d1,Stimuli_set/Elephant_10_d1.png,elephant,dotted_1 +Elephant_10_d2,Stimuli_set/Elephant_10_d2.png,elephant,dotted_2 +Elephant_10_t1,Stimuli_set/Elephant_10_t1.png,elephant,texture_1 +Elephant_10_t2,Stimuli_set/Elephant_10_t2.png,elephant,texture_2 +Elephant_11_d1,Stimuli_set/Elephant_11_d1.png,elephant,dotted_1 +Elephant_11_d2,Stimuli_set/Elephant_11_d2.png,elephant,dotted_2 +Elephant_11_t1,Stimuli_set/Elephant_11_t1.png,elephant,texture_1 +Elephant_11_t2,Stimuli_set/Elephant_11_t2.png,elephant,texture_2 +Elephant_13_d1,Stimuli_set/Elephant_13_d1.png,elephant,dotted_1 +Elephant_13_d2,Stimuli_set/Elephant_13_d2.png,elephant,dotted_2 +Elephant_13_t1,Stimuli_set/Elephant_13_t1.png,elephant,texture_1 +Elephant_13_t2,Stimuli_set/Elephant_13_t2.png,elephant,texture_2 +Elephant_14_d1,Stimuli_set/Elephant_14_d1.png,elephant,dotted_1 +Elephant_14_d2,Stimuli_set/Elephant_14_d2.png,elephant,dotted_2 +Elephant_14_t1,Stimuli_set/Elephant_14_t1.png,elephant,texture_1 +Elephant_14_t2,Stimuli_set/Elephant_14_t2.png,elephant,texture_2 +Elephant_15_d1,Stimuli_set/Elephant_15_d1.png,elephant,dotted_1 +Elephant_15_d2,Stimuli_set/Elephant_15_d2.png,elephant,dotted_2 +Elephant_15_t1,Stimuli_set/Elephant_15_t1.png,elephant,texture_1 +Elephant_15_t2,Stimuli_set/Elephant_15_t2.png,elephant,texture_2 +Elephant_2_d1,Stimuli_set/Elephant_2_d1.png,elephant,dotted_1 +Elephant_2_d2,Stimuli_set/Elephant_2_d2.png,elephant,dotted_2 +Elephant_2_t1,Stimuli_set/Elephant_2_t1.png,elephant,texture_1 +Elephant_2_t2,Stimuli_set/Elephant_2_t2.png,elephant,texture_2 +Elephant_3_d1,Stimuli_set/Elephant_3_d1.png,elephant,dotted_1 +Elephant_3_d2,Stimuli_set/Elephant_3_d2.png,elephant,dotted_2 +Elephant_3_t1,Stimuli_set/Elephant_3_t1.png,elephant,texture_1 +Elephant_3_t2,Stimuli_set/Elephant_3_t2.png,elephant,texture_2 +Elephant_6_d1,Stimuli_set/Elephant_6_d1.png,elephant,dotted_1 +Elephant_6_d2,Stimuli_set/Elephant_6_d2.png,elephant,dotted_2 +Elephant_6_t1,Stimuli_set/Elephant_6_t1.png,elephant,texture_1 +Elephant_6_t2,Stimuli_set/Elephant_6_t2.png,elephant,texture_2 +Elephant_7_d1,Stimuli_set/Elephant_7_d1.png,elephant,dotted_1 +Elephant_7_d2,Stimuli_set/Elephant_7_d2.png,elephant,dotted_2 +Elephant_7_t1,Stimuli_set/Elephant_7_t1.png,elephant,texture_1 +Elephant_7_t2,Stimuli_set/Elephant_7_t2.png,elephant,texture_2 +Elephant_9_d1,Stimuli_set/Elephant_9_d1.png,elephant,dotted_1 +Elephant_9_d2,Stimuli_set/Elephant_9_d2.png,elephant,dotted_2 +Elephant_9_t1,Stimuli_set/Elephant_9_t1.png,elephant,texture_1 +Elephant_9_t2,Stimuli_set/Elephant_9_t2.png,elephant,texture_2 +Knife_10_d1,Stimuli_set/Knife_10_d1.png,knife,dotted_1 +Knife_10_d2,Stimuli_set/Knife_10_d2.png,knife,dotted_2 +Knife_10_t1,Stimuli_set/Knife_10_t1.png,knife,texture_1 +Knife_10_t2,Stimuli_set/Knife_10_t2.png,knife,texture_2 +Knife_12_d1,Stimuli_set/Knife_12_d1.png,knife,dotted_1 +Knife_12_d2,Stimuli_set/Knife_12_d2.png,knife,dotted_2 +Knife_12_t1,Stimuli_set/Knife_12_t1.png,knife,texture_1 +Knife_12_t2,Stimuli_set/Knife_12_t2.png,knife,texture_2 +Knife_13_d1,Stimuli_set/Knife_13_d1.png,knife,dotted_1 +Knife_13_d2,Stimuli_set/Knife_13_d2.png,knife,dotted_2 +Knife_13_t1,Stimuli_set/Knife_13_t1.png,knife,texture_1 +Knife_13_t2,Stimuli_set/Knife_13_t2.png,knife,texture_2 +Knife_15_d1,Stimuli_set/Knife_15_d1.png,knife,dotted_1 +Knife_15_d2,Stimuli_set/Knife_15_d2.png,knife,dotted_2 +Knife_15_t1,Stimuli_set/Knife_15_t1.png,knife,texture_1 +Knife_15_t2,Stimuli_set/Knife_15_t2.png,knife,texture_2 +Knife_2_d1,Stimuli_set/Knife_2_d1.png,knife,dotted_1 +Knife_2_d2,Stimuli_set/Knife_2_d2.png,knife,dotted_2 +Knife_2_t1,Stimuli_set/Knife_2_t1.png,knife,texture_1 +Knife_2_t2,Stimuli_set/Knife_2_t2.png,knife,texture_2 +Knife_3_d1,Stimuli_set/Knife_3_d1.png,knife,dotted_1 +Knife_3_d2,Stimuli_set/Knife_3_d2.png,knife,dotted_2 +Knife_3_t1,Stimuli_set/Knife_3_t1.png,knife,texture_1 +Knife_3_t2,Stimuli_set/Knife_3_t2.png,knife,texture_2 +Knife_5_d1,Stimuli_set/Knife_5_d1.png,knife,dotted_1 +Knife_5_d2,Stimuli_set/Knife_5_d2.png,knife,dotted_2 +Knife_5_t1,Stimuli_set/Knife_5_t1.png,knife,texture_1 +Knife_5_t2,Stimuli_set/Knife_5_t2.png,knife,texture_2 +Knife_6_d1,Stimuli_set/Knife_6_d1.png,knife,dotted_1 +Knife_6_d2,Stimuli_set/Knife_6_d2.png,knife,dotted_2 +Knife_6_t1,Stimuli_set/Knife_6_t1.png,knife,texture_1 +Knife_6_t2,Stimuli_set/Knife_6_t2.png,knife,texture_2 +Knife_8_d1,Stimuli_set/Knife_8_d1.png,knife,dotted_1 +Knife_8_d2,Stimuli_set/Knife_8_d2.png,knife,dotted_2 +Knife_8_t1,Stimuli_set/Knife_8_t1.png,knife,texture_1 +Knife_8_t2,Stimuli_set/Knife_8_t2.png,knife,texture_2 +Knife_9_d1,Stimuli_set/Knife_9_d1.png,knife,dotted_1 +Knife_9_d2,Stimuli_set/Knife_9_d2.png,knife,dotted_2 +Knife_9_t1,Stimuli_set/Knife_9_t1.png,knife,texture_1 +Knife_9_t2,Stimuli_set/Knife_9_t2.png,knife,texture_2 diff --git a/brainscore_vision/data/bmd2024/test.py b/brainscore_vision/data/bmd2024/test.py new file mode 100644 index 000000000..1966ee30f --- /dev/null +++ b/brainscore_vision/data/bmd2024/test.py @@ -0,0 +1,130 @@ +import numpy as np +import pytest + +from brainscore_vision import load_stimulus_set, load_dataset + + +@pytest.mark.private_access +@pytest.mark.parametrize('assembly_identifier', [ + 'BMD2024.texture_1', + 'BMD2024.texture_2', + 'BMD2024.dotted_1', + 'BMD2024.dotted_2', +]) +def test_existence(assembly_identifier): + assert load_dataset(assembly_identifier) is not None + + +@pytest.mark.private_access +class TestAssemblies: + @pytest.mark.parametrize('identifier', [ + 'BMD2024.texture_1', + 'BMD2024.texture_2', + 'BMD2024.dotted_1', + 'BMD2024.dotted_2', + ]) + @pytest.mark.parametrize('field', [ + 'stimulus_id', + 'condition', + 'truth' + ]) + def test_stimulus_set_assembly_alignment(self, identifier, field): + assembly = load_dataset(identifier) + assert assembly.stimulus_set is not None + s3_identifier = identifier.replace('BMD2024.', 'BMD_2024_') + assert assembly.stimulus_set.identifier == s3_identifier + assert set(assembly.stimulus_set[field]) == set(assembly[field].values) + + # test the number of subjects + @pytest.mark.parametrize('identifier, num_subjects', [ + ('BMD2024.texture_1', 51), + ('BMD2024.texture_2', 52), + ('BMD2024.dotted_1', 54), + ('BMD2024.dotted_2', 54), + ]) + def test_num_subjects(self, identifier, num_subjects): + assembly = load_dataset(identifier) + assert len(np.unique(assembly['subject'].values)) == num_subjects + + # test number of unique images + @pytest.mark.parametrize('identifier, num_unique_images', [ + ('BMD2024.texture_1', 100), + ('BMD2024.texture_2', 100), + ('BMD2024.dotted_1', 100), + ('BMD2024.dotted_2', 100), + ]) + def test_num_unique_images(self, identifier, num_unique_images): + assembly = load_dataset(identifier) + assert len(np.unique(assembly['stimulus_id'].values)) == num_unique_images + + # tests assembly dim for ALL datasets + @pytest.mark.parametrize('identifier, length', [ + ('BMD2024.texture_1', 5100), + ('BMD2024.texture_2', 5200), + ('BMD2024.dotted_1', 5400), + ('BMD2024.dotted_2', 5400), + ]) + def test_length(self, identifier, length): + assembly = load_dataset(identifier) + assert len(assembly['presentation']) == length + + # test assembly coords present in ALL 17 sets: + @pytest.mark.parametrize('identifier', [ + 'BMD2024.texture_1', + 'BMD2024.texture_2', + 'BMD2024.dotted_1', + 'BMD2024.dotted_2', + ]) + @pytest.mark.parametrize('field', [ + 'subject', + 'subject_answer', + 'condition', + 'stimulus_id', + 'truth' + ]) + def test_fields_present(self, identifier, field): + assembly = load_dataset(identifier) + assert hasattr(assembly, field) + + +@pytest.mark.private_access +@pytest.mark.slow +class TestStimulusSets: + # test stimulus_set data: + @pytest.mark.parametrize('identifier', [ + 'BMD2024.texture_1', + 'BMD2024.texture_2', + 'BMD2024.dotted_1', + 'BMD2024.dotted_2', + ]) + def test_stimulus_set_exists(self, identifier): + stimulus_set = load_stimulus_set(identifier) + assert stimulus_set is not None + s3_identifier = identifier.replace('BMD2024.', 'BMD_2024_') + assert stimulus_set.identifier == s3_identifier + + @pytest.mark.parametrize('identifier, num_images', [ + ('BMD2024.texture_1', 100), + ('BMD2024.texture_2', 100), + ('BMD2024.dotted_1', 100), + ('BMD2024.dotted_2', 100), + ]) + def test_number_of_images(self, identifier, num_images): + stimulus_set = load_stimulus_set(identifier) + assert len(np.unique(stimulus_set['stimulus_id'].values)) == num_images + + # test assembly coords present in ALL 17 sets: + @pytest.mark.parametrize('identifier', [ + 'BMD2024.texture_1', + 'BMD2024.texture_2', + 'BMD2024.dotted_1', + 'BMD2024.dotted_2', + ]) + @pytest.mark.parametrize('field', [ + 'stimulus_id', + 'truth', + 'condition' + ]) + def test_fields_present(self, identifier, field): + stimulus_set = load_stimulus_set(identifier) + assert hasattr(stimulus_set, field) From 749e287c35f2cb6f974afe35eca198ea7c91f936 Mon Sep 17 00:00:00 2001 From: David Coggan <06case_hoses@icloud.com> Date: Sat, 6 Jul 2024 04:26:29 -0500 Subject: [PATCH 43/68] submission of coggan2024_behavior data and benchmark (#1008) * Create __init__.py * Add files via upload * Create __init__.py * Add files via upload * Update and rename __init__.py to __init__.py * Rename data_packaging.py to data_packaging.py * Update and rename test.py to test.py * Update test.py * Update __init__.py * Update test.py --- .../coggan2024_behavior/__init__.py | 8 + .../coggan2024_behavior/benchmark.py | 129 ++++++++++++++ .../benchmarks/coggan2024_behavior/test.py | 21 +++ .../data/coggan2024_behavior/__init__.py | 36 ++++ .../coggan2024_behavior/data_packaging.py | 166 ++++++++++++++++++ .../data/coggan2024_behavior/test.py | 32 ++++ 6 files changed, 392 insertions(+) create mode 100644 brainscore_vision/benchmarks/coggan2024_behavior/__init__.py create mode 100644 brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py create mode 100644 brainscore_vision/benchmarks/coggan2024_behavior/test.py create mode 100644 brainscore_vision/data/coggan2024_behavior/__init__.py create mode 100644 brainscore_vision/data/coggan2024_behavior/data_packaging.py create mode 100644 brainscore_vision/data/coggan2024_behavior/test.py diff --git a/brainscore_vision/benchmarks/coggan2024_behavior/__init__.py b/brainscore_vision/benchmarks/coggan2024_behavior/__init__.py new file mode 100644 index 000000000..a6ca7c28c --- /dev/null +++ b/brainscore_vision/benchmarks/coggan2024_behavior/__init__.py @@ -0,0 +1,8 @@ +# Created by David Coggan on 2024 06 25 + +from brainscore_vision import benchmark_registry +from .benchmark import ( + Coggan2024_behavior_ConditionWiseAccuracySimilarity) + +benchmark_registry['Coggan2024_behavior-ConditionWiseAccuracySimilarity'] = ( + Coggan2024_behavior_ConditionWiseAccuracySimilarity) diff --git a/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py b/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py new file mode 100644 index 000000000..14db4121f --- /dev/null +++ b/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py @@ -0,0 +1,129 @@ +# Created by David Coggan on 2024 06 25 + +import numpy as np +from brainio.assemblies import DataAssembly, BehavioralAssembly +from brainscore_vision import load_stimulus_set, load_dataset +from brainscore_vision.benchmarks import BenchmarkBase +from brainscore_vision.benchmark_helpers.screen import place_on_screen +from brainscore_core.metrics import Score +from brainscore_vision.metric_helpers import Defaults as XarrayDefaults +from brainscore_vision.model_interface import BrainModel +from brainscore_vision.utils import LazyLoad +from scipy.stats import sem +import pandas as pd + +# the BIBTEX will be used to link to the publication from the benchmark for further details +BIBTEX = """@article { + Tong.Coggan2024.behavior, + author = {David D. Coggan and Frank Tong}, + title = {Modeling human visual recognition of occluded objects}}, + year = {2024}, + url = {}, + journal = {in prep}}""" + +class Coggan2024_behavior_ConditionWiseAccuracySimilarity(BenchmarkBase): + + """ + This benchmark measures classification accuracy for a set of occluded object images, then attains the mean accuracy + for each of the 18 occlusion conditions. This is then correlated with the corresponding accuracies for each of the + 30 human subjects in the behavioral experiment to obtain the brain score. + Note: Because the object-occluder pairings were randomized for each subject, image-level metrics (e.g., error + consistency) have limited utility here as a ceiling cannot be calculated. + """ + + def __init__(self): + self._fitting_stimuli = load_stimulus_set('Coggan2024_behavior_fitting') # this fails is wrapped by LazyLoad + self._assembly = LazyLoad(lambda: load_dataset('Coggan2024_behavior')) + self._visual_degrees = 10 + self._number_of_trials = 1 + self._ceiling_func = lambda assembly: get_noise_ceiling(assembly) + super(Coggan2024_behavior_ConditionWiseAccuracySimilarity, self).__init__( + identifier='tong.Coggan2024_behavior-ConditionWiseAccuracySimilarity', + version=1, + ceiling_func=lambda df: get_noise_ceiling(df), + parent='behavior', + bibtex=BIBTEX, + ) + + def __call__(self, candidate: BrainModel) -> Score: + + fitting_stimuli = place_on_screen( + self._fitting_stimuli, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees) + candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli) + stimulus_set = place_on_screen( + self._assembly.stimulus_set, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees) + probabilities = candidate.look_at( + stimulus_set, number_of_trials=self._number_of_trials) + model_predictions = [ + probabilities.choice[c].values for c in probabilities.argmax(axis=1)] + + data = pd.DataFrame(dict( + subject=self._assembly.subject, + object_class=self._assembly.object_class, + occluder_type=self._assembly.occluder_type, + occluder_color=self._assembly.occluder_color, + visibility=self._assembly.visibility, + human_prediction=self._assembly.values, + human_accuracy=self._assembly.human_accuracy, + model_prediction=model_predictions + )) + data['model_accuracy'] = pd.Series( + data.model_prediction == data.object_class, dtype=int) + + # get correlation between model and human performance across conditions + performance = (data[data.visibility < 1] + .groupby(['subject', 'occluder_type', 'occluder_color']) + .mean(['human_accuracy', 'model_accuracy'])).reset_index() + scores = performance.groupby('subject').apply( + lambda df: np.corrcoef(df.human_accuracy, df.model_accuracy)[0, 1]) + score = Score(np.mean(scores)) + score.attrs['raw'] = scores + + # get ceiled score + ceiled_score = ceiler(score, self._ceiling_func(performance)) + ceiled_score.attrs['raw'] = score + + return ceiled_score + + +def get_noise_ceiling(performance: pd.DataFrame) -> Score: + """ + Returns the noise ceiling for human similarity estimates. This is the lower bound of typical noise-ceiling range + (e.g. Nili et al., 2014), i.e., the correlation of condition-wise accuracies between each individual subject and + the mean of the remaining subjects in the sample. This matches how the model is scored, if the group values are + substituted for model values. + """ + nc = [] + for subject in performance.subject.unique(): + performance_ind = performance[performance.subject == subject] + performance_grp = (performance[performance.subject != subject] + .groupby(['occluder_type', 'occluder_color']).mean()) + merged_df = performance_ind.merge( + performance_grp, on=['occluder_type', 'occluder_color']) + nc.append(np.corrcoef(merged_df.human_accuracy_x, merged_df.human_accuracy_y)[0, 1]) + ceiling = Score(np.mean(nc)) + ceiling.attrs['raw'] = nc + ceiling.attrs['error'] = sem(nc) + return ceiling + + +def ceiler(score: Score, ceiling: Score) -> Score: + # ro(X, Y) + # = (r(X, Y) / sqrt(r(X, X) * r(Y, Y)))^2 + # = (r(X, Y) / sqrt(r(Y, Y) * r(Y, Y)))^2 # assuming that r(Y, Y) ~ r(X, X) following Yamins 2014 + # = (r(X, Y) / r(Y, Y))^2 + r_square = np.power(score.values / ceiling.values, 2) + ceiled_score = Score(r_square) + if 'error' in score.attrs: + ceiled_score.attrs['error'] = score.attrs['error'] + ceiled_score.attrs[Score.RAW_VALUES_KEY] = score + ceiled_score.attrs['ceiling'] = ceiling + return ceiled_score + + + + diff --git a/brainscore_vision/benchmarks/coggan2024_behavior/test.py b/brainscore_vision/benchmarks/coggan2024_behavior/test.py new file mode 100644 index 000000000..218194684 --- /dev/null +++ b/brainscore_vision/benchmarks/coggan2024_behavior/test.py @@ -0,0 +1,21 @@ +# Created by David Coggan on 2024 06 26 + +import pytest +from pytest import approx +from brainscore_vision import benchmark_registry, load_benchmark +from brainscore_vision import load_model + + +def test_benchmark_registry(): + assert ('Coggan2024_behavior-ConditionWiseAccuracySimilarity' in + benchmark_registry) + +@pytest.mark.private_access +def test_benchmarks(): + benchmark = load_benchmark( + 'Coggan2024_behavior-ConditionWiseAccuracySimilarity') + model = load_model('alexnet') + result = benchmark(model) + assert result.values == approx(0.1318, abs=.001) + + diff --git a/brainscore_vision/data/coggan2024_behavior/__init__.py b/brainscore_vision/data/coggan2024_behavior/__init__.py new file mode 100644 index 000000000..8e05d6d69 --- /dev/null +++ b/brainscore_vision/data/coggan2024_behavior/__init__.py @@ -0,0 +1,36 @@ +# Created by David Coggan on 2024 06 23 + +from brainio.assemblies import BehavioralAssembly +from brainscore_vision import ( + stimulus_set_registry, data_registry, load_stimulus_set) +from brainscore_vision.data_helpers.s3 import ( + load_assembly_from_s3, load_stimulus_set_from_s3) + +# stimulus set +stimulus_set_registry['Coggan2024_behavior'] = lambda: load_stimulus_set_from_s3( + identifier="tong.Coggan2024_behavior", + bucket="brainio-brainscore", + csv_sha1="01c312c4c16f7acc5afddaafcf826e5af58b13e2", + zip_sha1="1c070b88fa45e9e69d58f95466cb6406a45a4873", + csv_version_id="oyBtiZLEnNnCOHGzH5e12uzO9FrQ.ja_", + zip_version_id="sgPkAllzqOgP6KZcuGenFdAecBmRYApo") + +# fitting stimuli +stimulus_set_registry['Coggan2024_behavior_fitting'] = lambda: ( + load_stimulus_set_from_s3( + identifier="tong.Coggan2024_behavior_fitting", + bucket="brainio-brainscore", + csv_sha1="136e48992305ea78a4fb77e9dfc75dcf01e885d0", + zip_sha1="24e68f5ba2f8f2105daf706307642637118e7d36", + csv_version_id="wq1GTpsTqB9ODddVSWpd4HMOllY7Q08_", + zip_version_id="IU.vvRmzBBhxErneMgQHoB1OKaeyen17")) + +# behavioral data +data_registry['Coggan2024_behavior'] = lambda: load_assembly_from_s3( + identifier="tong.Coggan2024_behavior", + version_id="Xuov.DxZnqSD1.Ia.ZySbSu8IW7cFigT", + sha1="c1ac4a268476c35bbe40081358667a03d3544631", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Coggan2024_behavior'), +) diff --git a/brainscore_vision/data/coggan2024_behavior/data_packaging.py b/brainscore_vision/data/coggan2024_behavior/data_packaging.py new file mode 100644 index 000000000..570336d88 --- /dev/null +++ b/brainscore_vision/data/coggan2024_behavior/data_packaging.py @@ -0,0 +1,166 @@ +# Created by David Coggan on 2024 06 23 + +# package stimuli +from brainio.stimuli import StimulusSet +from brainio.packaging import package_stimulus_set, package_data_assembly +from brainio.assemblies import BehavioralAssembly +import os +import os.path as op +import pandas as pd +import numpy as np +import torchvision.transforms as transforms +from PIL import Image + + +imagenet_metadata = dict( + bear=dict( + class_index_1K=294, + class_name='n02132136', + class_description='brown bear, bruin, Ursus arctos'), + bison=dict( + class_index_1K=347, + class_name='n02410509', + class_description='bison'), + elephant=dict( + class_index_1K=386, + class_name='n02504458', + class_description='African elephant, Loxodonta africana'), + hare=dict( + class_index_1K=331, + class_name='n02326432', + class_description='hare'), + jeep=dict( + class_index_1K=609, + class_name='n03594945', + class_description='jeep, landrover'), + lamp=dict( + class_index_1K=846, + class_name='n04380533', + class_description='table lamp'), + sportsCar=dict( + class_index_1K=817, + class_name='n04285008', + class_description='sports car, sport car'), + teapot=dict( + class_index_1K=849, + class_name='n04398044', + class_description='teapot') +) + + +""" +Stimuli used in human behavioral experiment. Each subject was shown a unique +set of 753 occluded images so the entire stimulus set is 22560 images. +""" +trials = pd.read_parquet('/home/tonglab/david/projects/p022_occlusion/in_vivo' + '/behavioral/exp1/analysis/trials.parquet') +trials.object_class.replace('car', 'sportsCar', inplace=True) +trials.prediction.replace('car', 'sportsCar', inplace=True) +stimuli = [] # collect meta +stimulus_paths = {} +exp_dir = ( + '/home/tonglab/david/projects/p022_occlusion/in_vivo/behavioral/exp1/data') +subjects = sorted(trials.subject.unique()) +for t, trial in trials.iterrows(): + subj = subjects.index(trial.subject) # 0-indexed + stim_path = exp_dir + trial.occluded_object_path.split('logFiles')[1] + stimulus_id = f'{t:05}_sub-{subj:02}_trial-{trial.trial-1:03}' + object_class, occluder_type, coverage, occluder_color, rep = ( + op.basename(stim_path).split('.png')[0].split('_')) + visibility = np.round(1 - float(coverage), 1) + object_data = imagenet_metadata[object_class] + stimulus_paths[stimulus_id] = stim_path + stimuli.append({ + 'stimulus_id': stimulus_id, + 'subject': f'sub-{subj:02}', + 'trial': trial.trial-1, + 'object_class': object_class, + 'imagenet_class_index_1K': object_data['class_index_1K'], + 'imagenet_class_name': object_data['class_name'], + 'imagenet_class_description': object_data['class_description'], + 'occluder_type': occluder_type, + 'occluder_color': occluder_color, + 'visibility': visibility, + 'repetition': int(rep)}) +stimuli = StimulusSet(stimuli) +stimuli.stimulus_paths = stimulus_paths +stimuli.name = "tong.Coggan2024_behavior" + +assert len(stimuli) == 22560 + +packaged_stimulus_metadata = package_stimulus_set( + catalog_name=None, + proto_stimulus_set=stimuli, + stimulus_set_identifier=stimuli.name, + bucket_name="brainio-brainscore") +print(packaged_stimulus_metadata) + + +""" +Stimuli used to fit the models. +These are an independent set of imagenet images from the same 8 classes as the +behavioral experiment. +""" +# get stimuli pngs +transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Resize(224), + transforms.CenterCrop(224), + transforms.Grayscale(num_output_channels=3), + transforms.ToPILImage()]) +svc_dataset = pd.read_csv('/home/tonglab/david/projects/p022_occlusion/' + 'in_silico/analysis/scripts/utils/SVC_images.csv') +svc_dataset['class'].replace('car', 'sportsCar', inplace=True) +image_dir = ('/home/tonglab/david/projects/p022_occlusion/in_silico/images' + '/behavior_svc') +#os.makedirs(image_dir, exist_ok=True) +#for image_path in svc_dataset.filepath.values: +# image = Image.open(image_path) +# image = transform(image) +# image.save(f'{image_dir}/{op.basename(image_path).split(".")[0]}.png') + +# package stimuli +stimuli = [] +stimulus_paths = {} +for i, row in svc_dataset.iterrows(): + stimulus_id = f'{i:04}_{op.basename(row.filepath).split(".")[0]}' + stimulus_paths[stimulus_id] = \ + f'{image_dir}/{op.basename(row.filepath).split(".")[0]}.png' + stimuli.append({ + 'stimulus_id': stimulus_id, + 'image_label': row['class'], + 'imagenet_path': row.filepath.split('ILSVRC2012/')[-1]}) +stimuli = StimulusSet(stimuli) +stimuli.stimulus_paths = stimulus_paths +stimuli.name = "tong.Coggan2024_behavior_fitting" +assert len(stimuli) == 2048 +packaged_stimulus_metadata = package_stimulus_set( + catalog_name=None, + proto_stimulus_set=stimuli, + stimulus_set_identifier=stimuli.name, + bucket_name="brainio-brainscore") +print(packaged_stimulus_metadata) + + +""" +Human behavioral responses to occluded images. +""" +# package data assembly +predictions = trials.prediction.to_list() +stimulus_ids = stimuli.stimulus_id.to_list() +assembly = BehavioralAssembly( + predictions, + dims=['presentation'], + coords={'stimulus_id': ('presentation', stimulus_ids), + 'human_accuracy': ('presentation', trials.accuracy)}) +assembly.name = "tong.Coggan2024_behavior" + +# upload to S3 +packaged_behavioral_metadata = package_data_assembly( + proto_data_assembly=assembly, + assembly_identifier=assembly.name, + stimulus_set_identifier=stimuli.name, + assembly_class_name="BehavioralAssembly", + bucket_name="brainio-brainscore", + catalog_identifier=None) +print(packaged_behavioral_metadata) diff --git a/brainscore_vision/data/coggan2024_behavior/test.py b/brainscore_vision/data/coggan2024_behavior/test.py new file mode 100644 index 000000000..527703c2e --- /dev/null +++ b/brainscore_vision/data/coggan2024_behavior/test.py @@ -0,0 +1,32 @@ +# Created by David Coggan on 2024 06 26 + +import pytest +import numpy as np +import brainscore_vision + +@pytest.mark.private_access +def test_Coggan2024_behavior_stimuli(): + stimulus_set = brainscore_vision.load_stimulus_set('Coggan2024_behavior') + assert len(stimulus_set) == 22560 + assert len(set(stimulus_set['object_class'])) == 8 + assert len(set(stimulus_set['occluder_type'])) == 10 + assert len(set(stimulus_set['occluder_color'])) == 3 + assert len(set(stimulus_set['visibility'])) == 6 + +@pytest.mark.private_access +def test_Coggan2024_behavior_stimuli_fitting(): + stimulus_set = brainscore_vision.load_stimulus_set( + 'Coggan2024_behavior_fitting') + assert len(stimulus_set) == 2048 + +@pytest.mark.private_access +def test_Coggan2024_behavior_dataset(): + assembly = brainscore_vision.load_dataset('Coggan2024_behavior') + np.testing.assert_array_equal( + assembly.dims, ['presentation']) + assert len(set(assembly['stimulus_id'].values)) == 22560 + assert assembly.shape[0] == 22560 + assert assembly.stimulus_set is not None + assert len(assembly.stimulus_set) == 22560 + + From 6c1786cd7ffedf2042a72680955058a7c3de3a5c Mon Sep 17 00:00:00 2001 From: David Coggan <06case_hoses@icloud.com> Date: Sat, 6 Jul 2024 04:27:37 -0500 Subject: [PATCH 44/68] submission of coggan2024_fMRI benchmark (#970) * coggan2024_fMRI benchmark * coggan2024_fMRI benchmark * Update brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/benchmarks/coggan2024_fMRI/test.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py Co-authored-by: Martin Schrimpf * Benchmark naming convention adherence * Test expressiveness improved * Expected score precision reduced --------- Co-authored-by: ddcoggan Co-authored-by: Martin Schrimpf --- .../benchmarks/coggan2024_fMRI/__init__.py | 15 ++ .../benchmarks/coggan2024_fMRI/benchmark.py | 201 ++++++++++++++++++ .../benchmarks/coggan2024_fMRI/test.py | 25 +++ .../data/coggan2024_fMRI/__init__.py | 27 +++ .../data/coggan2024_fMRI/data_packaging.py | 123 +++++++++++ .../data/coggan2024_fMRI/test.py | 25 +++ 6 files changed, 416 insertions(+) create mode 100644 brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py create mode 100644 brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py create mode 100644 brainscore_vision/benchmarks/coggan2024_fMRI/test.py create mode 100644 brainscore_vision/data/coggan2024_fMRI/__init__.py create mode 100644 brainscore_vision/data/coggan2024_fMRI/data_packaging.py create mode 100644 brainscore_vision/data/coggan2024_fMRI/test.py diff --git a/brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py b/brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py new file mode 100644 index 000000000..20ece8608 --- /dev/null +++ b/brainscore_vision/benchmarks/coggan2024_fMRI/__init__.py @@ -0,0 +1,15 @@ +# Created by David Coggan on 2024 06 25 + +from brainscore_vision import benchmark_registry +from .benchmark import ( + Coggan2024_V1, + Coggan2024_V2, + Coggan2024_V4, + Coggan2024_IT, +) + +benchmark_registry['Coggan2024_fMRI.V1-rdm'] = Coggan2024_V1 +benchmark_registry['Coggan2024_fMRI.V2-rdm'] = Coggan2024_V2 +benchmark_registry['Coggan2024_fMRI.V4-rdm'] = Coggan2024_V4 +benchmark_registry['Coggan2024_fMRI.IT-rdm'] = Coggan2024_IT + diff --git a/brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py b/brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py new file mode 100644 index 000000000..153e2bfe3 --- /dev/null +++ b/brainscore_vision/benchmarks/coggan2024_fMRI/benchmark.py @@ -0,0 +1,201 @@ +# Created by David Coggan on 2024 06 25 + +import numpy as np +from brainio.assemblies import DataAssembly, NeuroidAssembly +from brainscore_vision import load_dataset +from brainscore_vision.benchmarks import BenchmarkBase +from brainscore_vision.benchmark_helpers.screen import place_on_screen +from brainscore_core.metrics import Score +from brainscore_vision.metric_helpers import Defaults as XarrayDefaults +from brainscore_vision.model_interface import BrainModel + + +# the BIBTEX will be used to link to the publication from the benchmark for further details +BIBTEX = """@article { + Tong.Coggan2024.fMRI, + author = {David D. Coggan and Frank Tong}, + title = {Evidence of strong amodal completion in both early and + high-level visual cortices}, + year = {2024}, + url = {}, + journal = {under review}}""" + + +class Coggan2024_fMRI_Benchmark(BenchmarkBase): + def __init__(self, identifier, assembly, ceiling_func, visual_degrees, + **kwargs): + super(Coggan2024_fMRI_Benchmark, self).__init__( + identifier=identifier, + ceiling_func=ceiling_func, **kwargs) + self._assembly = assembly + self._visual_degrees = visual_degrees + self._ceiling_func = ceiling_func + region = np.unique(self._assembly['region']) + assert len(region) == 1 + self.region = region[0] + + def __call__(self, candidate: BrainModel): + + # get stimuli + stimulus_set = place_on_screen( + self._assembly.stimulus_set, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees) + + # get model activations + candidate.start_recording(self.region, time_bins=[(0, 250)]) + source_assembly = candidate.look_at(stimulus_set, number_of_trials=1) + if ('time_bin' in source_assembly.dims and + source_assembly.sizes['time_bin'] == 1): + source_assembly = source_assembly.squeeze('time_bin') + + # obtain the RSM + source_rsm = RSA(source_assembly, XarrayDefaults.neuroid_dim) + + # compare the model and human RSMs to get a score + raw_score = get_score(source_rsm, self._assembly) + + # obtain the score ceiling + ceiling = self._ceiling_func(self._assembly) + + # obtain the ceiled score + ceiled_score = ceiler(raw_score, ceiling) + + return ceiled_score + + +def get_ceiling(assembly: NeuroidAssembly) -> Score: + + """ + Returns the noise ceiling for the roi of the assembly. + This is the lower bound of typical noise-ceiling range + (e.g. Nili et al., 2014), i.e., the correlation of each individual + subject's RSM with the mean RSM across the remaining subjects in the sample. + This matches how the model is scored, if the group RSM is substituted for + model RSM. + """ + + off_diag_indices = np.array(1 - np.eye(24).flatten(), dtype=bool) + assert len(set(assembly.region.values)) == 1 + nc = [] + n_subs = len(assembly['subject']) + for s in range(n_subs): + # get individual and group RSMs, flatten, remove on-diagonal vals + RSM_ind = assembly.values[:, :, s].flatten()[off_diag_indices] + RSM_grp = assembly.values[:, :, [i for i in range(n_subs) if i != s] + ].mean(axis=2).flatten()[off_diag_indices] + nc.append(np.corrcoef(RSM_ind, RSM_grp)[0, 1]) + noise_ceiling = Score(np.mean(nc)) + noise_ceiling.attrs['raw'] = nc + return noise_ceiling + + +def get_score(source_rsm: NeuroidAssembly, target_rsm: NeuroidAssembly) -> Score: + + """ + Computes the pearson correlation between the model RSM and each subject's + RSM, the average of which is returned as the score. Individual scores are + stored in the Score's attributes. + """ + + off_diag_indices = np.array(1 - np.eye(24).flatten(), dtype=bool) + model_rsm = source_rsm.values.flatten()[off_diag_indices] + scores = [] + n_subs = len(target_rsm['subject']) + for s in range(n_subs): + human_rsm = target_rsm.values[:, :, s].flatten()[off_diag_indices] + scores.append(np.corrcoef(human_rsm, model_rsm)[0, 1]) + score = Score(np.mean(scores)) + score.attrs[Score.RAW_VALUES_KEY] = scores + return score + + +def ceiler(score: Score, ceiling: Score) -> Score: + # ro(X, Y) + # = (r(X, Y) / sqrt(r(X, X) * r(Y, Y)))^2 + # = (r(X, Y) / sqrt(r(Y, Y) * r(Y, Y)))^2 # assuming that r(Y, Y) ~ r(X, X) following Yamins 2014 + # = (r(X, Y) / r(Y, Y))^2 + r_square = np.power(score.values / ceiling.values, 2) + ceiled_score = Score(r_square) + if 'error' in score.attrs: + ceiled_score.attrs['error'] = score.attrs['error'] + ceiled_score.attrs[Score.RAW_VALUES_KEY] = score + ceiled_score.attrs['ceiling'] = ceiling + return ceiled_score + + +def RSA(assembly: NeuroidAssembly, neuroid_dim: str) -> DataAssembly: + + """ + Performs analogous unit selection and normalization as the fMRI analysis, + then calculates RSMs. + """ + + # get data orientation + assert neuroid_dim in assembly.dims, \ + f'neuroid_dim {neuroid_dim} not in assembly dims {assembly.dims}' + if assembly.dims.index(neuroid_dim) == 0: + assembly = assembly.transpose('presentation', 'neuroid') + + patterns = assembly.values + n_conds, n_chan = patterns.shape + assert n_conds == 24, f'Expected 24 conditions, got {n_conds}' + + # remove units with no variance across conditions + patterns_std = patterns.std(axis=0) + patterns = patterns[:, patterns_std != 0] + + # select units with the highest mean response magnitude across conditions + patterns_mean = patterns.mean(axis=0) + std_x_units = patterns_mean.std() # std of mean unit-wise response + selected_units = np.abs(patterns_mean) > (std_x_units * 3.1) + assert selected_units.any(), \ + 'No units with mean response > 3.1 * std over all units' + patterns = patterns[:, selected_units] + + # convert to z-score + patterns_mean = np.tile(patterns.mean(0), (n_conds, 1)) + patterns_std = np.tile(patterns.std(0), (n_conds, 1)) + patterns = (patterns - patterns_mean) / patterns_std + + # perform pairwise correlation + correlations = np.corrcoef(patterns) + coords = {coord: coord_value for coord, coord_value in + assembly.coords.items() if coord != neuroid_dim} + dims = [dim if dim != neuroid_dim else assembly.dims[ + (i - 1) % len(assembly.dims)] + for i, dim in enumerate(assembly.dims)] + similarities = DataAssembly(correlations, coords=coords, dims=dims) + return similarities + + +def _Coggan2024_Region(region: str): + assembly = load_dataset('Coggan2024_fMRI') + assembly = assembly.sel(region=region) + assembly['region'] = ('subject', [region] * len(assembly['subject'])) + benchmark = Coggan2024_fMRI_Benchmark( + identifier=f'tong.Coggan2024_fMRI.{region}-rdm', + version=1, + assembly=assembly, + visual_degrees=9, + ceiling_func=get_ceiling, + parent=region, + bibtex=BIBTEX) + return benchmark + + +def Coggan2024_V1(): + return _Coggan2024_Region(region='V1') + + +def Coggan2024_V2(): + return _Coggan2024_Region(region='V2') + + +def Coggan2024_V4(): + return _Coggan2024_Region(region='V4') + + +def Coggan2024_IT(): + return _Coggan2024_Region(region='IT') + diff --git a/brainscore_vision/benchmarks/coggan2024_fMRI/test.py b/brainscore_vision/benchmarks/coggan2024_fMRI/test.py new file mode 100644 index 000000000..6847cee5f --- /dev/null +++ b/brainscore_vision/benchmarks/coggan2024_fMRI/test.py @@ -0,0 +1,25 @@ +# Created by David Coggan on 2024 06 26 + +import pytest +from pytest import approx +from brainscore_vision import benchmark_registry, load_benchmark +from brainscore_vision import load_model + + +@pytest.mark.parametrize('region', ['V1', 'V2', 'V4', 'IT']) +def test_benchmark_registry(region): + assert f'Coggan2024_fMRI.{region}-rdm' in benchmark_registry + + +@pytest.mark.parametrize('region', ['V1', 'V2', 'V4', 'IT']) +def test_benchmarks(region): + expected_score = dict( + V1=0.0182585, + V2=0.3352083, + V4=0.3008136, + IT=0.4486508)[region] + model = load_model('alexnet') + benchmark = load_benchmark(f'Coggan2024_fMRI.{region}-rdm') + score = benchmark(model) + assert score.values == approx(expected_score, abs=.005) + diff --git a/brainscore_vision/data/coggan2024_fMRI/__init__.py b/brainscore_vision/data/coggan2024_fMRI/__init__.py new file mode 100644 index 000000000..ab94ab1cd --- /dev/null +++ b/brainscore_vision/data/coggan2024_fMRI/__init__.py @@ -0,0 +1,27 @@ +# Created by David Coggan on 2024 06 23 + +from brainio.assemblies import NeuroidAssembly +from brainscore_vision import ( + stimulus_set_registry, data_registry, load_stimulus_set) +from brainscore_vision.data_helpers.s3 import ( + load_assembly_from_s3, load_stimulus_set_from_s3) + +# stimulus set +stimulus_set_registry['Coggan2024_fMRI'] = lambda: load_stimulus_set_from_s3( + identifier="tong.Coggan2024_fMRI", + bucket="brainio-brainscore", + csv_sha1="0089f5f8fd3f2de14de12ed736a0f88575f8e1ee", + zip_sha1="e26fdea4d866799526dea183f5bfb9792718822a", + csv_version_id="q0kxLCC8m6LSaLEQzB26sfyoKgJJwkAs", + zip_version_id="SZwyN4ZgEmpW22YlcbqIbL9c0QudlMAp") + +# fMRI data +data_registry['Coggan2024_fMRI'] = lambda: load_assembly_from_s3( + identifier="tong.Coggan2024_fMRI", + version_id="6P898Mio3VBsFx_qbC9rJECMELNGRprH", + sha1="da3adbca5247d0491d366f94e8431fb3e4e58db2", + bucket="brainio-brainscore", + cls=NeuroidAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Coggan2024_fMRI'), +) + diff --git a/brainscore_vision/data/coggan2024_fMRI/data_packaging.py b/brainscore_vision/data/coggan2024_fMRI/data_packaging.py new file mode 100644 index 000000000..3d3a2f89c --- /dev/null +++ b/brainscore_vision/data/coggan2024_fMRI/data_packaging.py @@ -0,0 +1,123 @@ +# Created by David Coggan on 2024 06 23 + +from brainio.stimuli import StimulusSet +from brainio.packaging import package_stimulus_set +import glob +import os.path as op +from brainio.assemblies import DataAssembly, NeuroidAssembly +from brainio.packaging import package_data_assembly +import pickle as pkl +from itertools import product as itp +import numpy as np + + +# imagenet metadata +imagenet_metadata = dict( + bear=dict( + class_index_1K=294, + class_name='n02132136', + class_description='brown bear, bruin, Ursus arctos', + path='val/n02132136/ILSVRC2012_val_00049345.jpg'), + bison=dict( + class_index_1K=347, + class_name='n02410509', + class_description='bison', + path='val/n02410509/ILSVRC2012_val_00048511.jpg'), + elephant=dict( + class_index_1K=386, + class_name='n02504458', + class_description='African elephant, Loxodonta africana', + path='val/n02504458/ILSVRC2012_val_00030840.jpg'), + hare=dict( + class_index_1K=331, + class_name='n02326432', + class_description='hare', + path='val/n02326432/ILSVRC2012_val_00004064.jpg'), + jeep=dict( + class_index_1K=609, + class_name='n03594945', + class_description='jeep, landrover', + path='val/n03594945/ILSVRC2012_val_00036304.jpg'), + lamp=dict( + class_index_1K=846, + class_name='n04380533', + class_description='table lamp', + path='val/n04380533/ILSVRC2012_val_00001055.jpg'), + sportsCar=dict( + class_index_1K=817, + class_name='n04285008', + class_description='sports car, sport car', + path='val/n04285008/ILSVRC2012_val_00001247.jpg'), + teapot=dict( + class_index_1K=849, + class_name='n04398044', + class_description='teapot', + path='val/n04398044/ILSVRC2012_val_00033663.jpg') +) + +# stimuli +stimuli = [] # collect meta +stimulus_paths = {} # collect mapping of stimulus_id to filepath +for f, filepath in enumerate(sorted(glob.glob('stimuli/*.png'))): + stimulus_id = op.basename(filepath).split('.')[0] + object_name, occlusion_condition = stimulus_id.split('_') + occlusion_condition = occlusion_condition.split('.')[0] + stimulus_paths[stimulus_id] = filepath + object_data = imagenet_metadata[object_name] + stimuli.append({ + 'stimulus_id': stimulus_id, + 'object_name': object_name, + 'occlusion_condition': occlusion_condition, + 'imagenet_class_index_1K': object_data['class_index_1K'], + 'imagenet_class_name': object_data['class_name'], + 'imagenet_class_description': object_data['class_description'], + 'imagenet_path': object_data['path'], + }) +stimuli = StimulusSet(stimuli) +stimuli.stimulus_paths = stimulus_paths +stimuli.name = 'coggan2024_fMRI' +""" +packaged_stimulus_metadata = package_stimulus_set( + catalog_name=None, + proto_stimulus_set=stimuli, + stimulus_set_identifier=stimuli.name, + bucket_name="brainio-brainscore") +pkl.dump(packaged_stimulus_metadata, open('packaged_stimulus_metadata.pkl', 'wb')) +print(packaged_stimulus_metadata) +""" + + +# fMRI data +dataset = pkl.load(open('dataset.pkl', 'rb')) +regions = ['V1', 'V2', 'V4', 'IT'] +subjects = list(dataset['V1'].keys()) +n_subs = len(subjects) +off_diag_indices = np.array(1 - np.eye(24).flatten(), dtype=bool) +data_all, splits_all, sides_all, subjects_all, regions_all = [], [], [], [], [] +for region, subject in itp(regions, subjects): + data_all.append(dataset[region][subject]) + subjects_all.append(subject) + regions_all.append(region) +data_all = np.stack(data_all, axis=2) +assembly = NeuroidAssembly( + data_all, dims=['presentation', 'presentation', 'neuroid'], + coords={ + 'stimulus_id': ('presentation', stimuli.stimulus_id), + 'object_name': ('presentation', stimuli.object_name), + 'occlusion_condition': ('presentation', stimuli.occlusion_condition), + 'subject': ('neuroid', subjects_all), + 'region': ('neuroid', regions_all), +}) +assembly.name = 'coggan2024_fMRI' + +packaged_neural_metadata = package_data_assembly( + proto_data_assembly=assembly, + assembly_identifier=assembly.name, + stimulus_set_identifier=stimuli.name, + assembly_class_name="NeuroidAssembly", + bucket_name="brainio-brainscore", + catalog_identifier=None) + +# save the packaged metadata +pkl.dump(packaged_neural_metadata, open('packaged_neural_metadata.pkl', 'wb')) +print(packaged_neural_metadata) \ No newline at end of file diff --git a/brainscore_vision/data/coggan2024_fMRI/test.py b/brainscore_vision/data/coggan2024_fMRI/test.py new file mode 100644 index 000000000..6915ea505 --- /dev/null +++ b/brainscore_vision/data/coggan2024_fMRI/test.py @@ -0,0 +1,25 @@ +# Created by David Coggan on 2024 06 26 + +import brainscore_vision +import numpy as np + + +def test_Coggan2024_fMRI_stimuli(): + stimulus_set = brainscore_vision.load_stimulus_set('Coggan2024_fMRI') + assert len(stimulus_set) == 24 + assert len(set(stimulus_set['object_name'])) == 8 + assert len(set(stimulus_set['occlusion_condition'])) == 3 + + +def test_Coggan2024_fMRI_dataset(): + assembly = brainscore_vision.load_dataset('Coggan2024_fMRI') + np.testing.assert_array_equal( + assembly.dims, ['presentation', 'presentation', 'neuroid']) + assert len(set(assembly['stimulus_id'].values)) == 24 + assert assembly.shape[0] == 24 + assert assembly.shape[1] == 24 + assert assembly.shape[2] == 36 + assert assembly.stimulus_set is not None + assert len(assembly.stimulus_set) == 24 + + From b1bc672e075cfbef30ac94730e4a57e56f83bd8f Mon Sep 17 00:00:00 2001 From: Michael Ferguson Date: Sat, 6 Jul 2024 08:48:16 -0400 Subject: [PATCH 45/68] Add maniquet2024 benchmark (competition) (#940) * add maniquet2024 benchmark material * use relative import for init file * removed TaskConsistency from test.py * add Maniquet2024TasksConsistency back into registry * add metric and data requirements.txt * git comments * git comments, round 2 * benchmark now produces scores * fix registry test * fix identifier in test --------- Co-authored-by: Martin Schrimpf --- .../benchmarks/maniquet2024/__init__.py | 6 + .../benchmarks/maniquet2024/benchmark.py | 192 ++++++++ .../benchmarks/maniquet2024/requirements.txt | 3 + .../benchmarks/maniquet2024/test.py | 17 + .../data/maniquet2024/__init__.py | 59 +++ .../data/maniquet2024/data_packaging.py | 151 ++++++ brainscore_vision/data/maniquet2024/test.py | 16 + .../metrics/maniquet2024_metrics/__init__.py | 19 + .../metrics/maniquet2024_metrics/metric.py | 443 ++++++++++++++++++ .../maniquet2024_metrics/requirements.txt | 3 + .../metrics/maniquet2024_metrics/test.py | 8 + 11 files changed, 917 insertions(+) create mode 100644 brainscore_vision/benchmarks/maniquet2024/__init__.py create mode 100644 brainscore_vision/benchmarks/maniquet2024/benchmark.py create mode 100644 brainscore_vision/benchmarks/maniquet2024/requirements.txt create mode 100644 brainscore_vision/benchmarks/maniquet2024/test.py create mode 100644 brainscore_vision/data/maniquet2024/__init__.py create mode 100644 brainscore_vision/data/maniquet2024/data_packaging.py create mode 100644 brainscore_vision/data/maniquet2024/test.py create mode 100644 brainscore_vision/metrics/maniquet2024_metrics/__init__.py create mode 100644 brainscore_vision/metrics/maniquet2024_metrics/metric.py create mode 100644 brainscore_vision/metrics/maniquet2024_metrics/requirements.txt create mode 100644 brainscore_vision/metrics/maniquet2024_metrics/test.py diff --git a/brainscore_vision/benchmarks/maniquet2024/__init__.py b/brainscore_vision/benchmarks/maniquet2024/__init__.py new file mode 100644 index 000000000..acc15b632 --- /dev/null +++ b/brainscore_vision/benchmarks/maniquet2024/__init__.py @@ -0,0 +1,6 @@ +from brainscore_vision import benchmark_registry +from .benchmark import Maniquet2024ConfusionSimilarity, Maniquet2024TasksConsistency + +benchmark_registry['Maniquet2024-confusion_similarity'] = lambda: Maniquet2024ConfusionSimilarity() +benchmark_registry['Maniquet2024-tasks_consistency'] = lambda: Maniquet2024TasksConsistency() + diff --git a/brainscore_vision/benchmarks/maniquet2024/benchmark.py b/brainscore_vision/benchmarks/maniquet2024/benchmark.py new file mode 100644 index 000000000..762087e5b --- /dev/null +++ b/brainscore_vision/benchmarks/maniquet2024/benchmark.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Jun 21 23:15:17 2024 + +@author: costantino_ai +""" +from brainscore_vision.benchmarks import BenchmarkBase +from brainscore_vision.benchmark_helpers.screen import place_on_screen +from brainscore_vision.model_interface import BrainModel +from brainscore_vision import load_stimulus_set, load_metric, load_dataset +from brainscore_vision.utils import LazyLoad + + +BIBTEX = """@article {Maniquet2024.04.02.587669, + author = {Maniquet, Tim and de Beeck, Hans Op and Costantino, Andrea Ivan}, + title = {Recurrent issues with deep neural network models of visual recognition}, + elocation-id = {2024.04.02.587669}, + year = {2024}, + doi = {10.1101/2024.04.02.587669}, + publisher = {Cold Spring Harbor Laboratory}, + URL = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669}, + eprint = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669.full.pdf}, + journal = {bioRxiv} +}""" + + +class _Maniquet2024ConfusionSimilarity(BenchmarkBase): + """ + A benchmark class to measure the similarity between model-generated confusion probabilities + and human confusion data in visual tasks, specifically designed for the Maniquet2024 dataset. + + Attributes: + _metric (ConfusionSimilarity): The metric used to compare model outputs with human data. + _fitting_stimuli (StimulusSet): Stimulus set used for training or fitting the model. + _stimulus_set (StimulusSet): Stimulus set used for testing the model. + _human_assembly (DataAssembly): Human behavioral data for comparison. + _visual_degrees (int): The size of stimuli in visual degrees as presented to humans. + _number_of_trials (int): Number of trials to average over for the model predictions. + """ + + def __init__(self): + """ + Initializes the benchmark by setting up the necessary parameters. + """ + # Initialize the metric for evaluating confusion similarity + self._metric = load_metric('confusion_similarity') + + # Load training stimuli from the stimulus set registry + self._fitting_stimuli = load_stimulus_set('Maniquet2024-train') + + # Load testing stimuli from the stimulus set registry + self._stimulus_set = load_stimulus_set('Maniquet2024-test') + + # Load human behavioral data from the data registry + self._human_assembly = load_dataset('Maniquet2024') + + # Set the visual degrees to which the human data was exposed + self._visual_degrees = 8 + + # Set the number of trials to perform + self._number_of_trials = 1 + + # Call the parent class constructor to complete initialization + super(_Maniquet2024ConfusionSimilarity, self).__init__( + identifier="Maniquet2024-confusion_similarity'", + version=1, + ceiling_func=lambda: self._metric._ceiling(self._assembly), + parent="Maniquet2024", + bibtex=BIBTEX, + ) + + def __call__(self, candidate: BrainModel): + """ + Executes the benchmark by comparing the candidate model's confusion probabilities against human data. + + Args: + candidate (BrainModel): The model being evaluated. + + Returns: + float: The similarity score between the model and human data. + """ + # Start the model on the task of predicting confusion probabilities + fitting_stimuli = place_on_screen(self._fitting_stimuli, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees) + candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli) + + # Prepare the stimulus set by placing it on a virtual screen at a scale appropriate for the model + stimulus_set = place_on_screen(self._stimulus_set, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees) + + # Model looks at the stimulus set and returns confusion probabilities + probabilities = candidate.look_at(stimulus_set, number_of_trials=self._number_of_trials) + + # Compute the confusion similarity score between model probabilities and human assembly data + # NOTE: score output here is *already* ceiling-normalized!! + score = self._metric(probabilities, self._human_assembly) + return score + + +class _Maniquet2024TasksConsistency(BenchmarkBase): + """ + A benchmarking class designed to evaluate the consistency of the human accuracy profiles across + all tasks with the model's accuracy profiles across the same tasks. + + Attributes: + _metric (TasksConsistency): The metric for evaluating task consistency between the model and human data. + _fitting_stimuli (StimulusSet): The set of stimuli used for model training or calibration. + _stimulus_set (StimulusSet): The set of stimuli used for testing the model's predictions. + _human_assembly (DataAssembly): The dataset containing human response data for comparison. + _visual_degrees (int): The visual size of the stimuli as perceived by human subjects. + _number_of_trials (int): The number of trials over which model predictions are averaged. + """ + + def __init__(self): + """ + Initializes the benchmark setup, including loading necessary datasets, defining the metric, and setting + up the parameters for the evaluation. + """ + # Metric for evaluating the consistency of task performance + self._metric = load_metric('tasks_consistency') + + # Load training stimuli from the stimulus set registry + self._fitting_stimuli = load_stimulus_set('Maniquet2024-train') + + # Load testing stimuli from the stimulus set registry + self._stimulus_set = load_stimulus_set('Maniquet2024-test') + + # Load human behavioral data from the data registry + self._human_assembly = load_dataset('Maniquet2024') + + # Set the visual context to match human study conditions + self._visual_degrees = 8 + + # Define the number of trials for model evaluation + self._number_of_trials = 1 + + # Initialize parent class with benchmark-specific metadata + super(_Maniquet2024TasksConsistency, self).__init__( + identifier="Maniquet2024-tasks_consistency", + version=1, + ceiling_func=lambda: self._metric.ceiling(self._human_assembly), + parent="Maniquet2024", + bibtex=BIBTEX, + ) + + def __call__(self, candidate: BrainModel): + """ + Executes the benchmark by comparing the candidate model's task performance probabilities + against human data, and returns a similarity score. + + Args: + candidate (BrainModel): The neural model being evaluated. + + Returns: + float: A similarity score indicating how closely the model's responses match human responses. + """ + # Task the model with generating predictions based on the fitting stimuli + fitting_stimuli = place_on_screen( + self._fitting_stimuli, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees, + ) + candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli) + + # Adjust the stimulus presentation to match the model's expected input scale + stimulus_set = place_on_screen( + self._stimulus_set, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees, + ) + + # Obtain the model's predictions as confusion probabilities + probabilities = candidate.look_at( + stimulus_set, number_of_trials=self._number_of_trials + ) + + # Evaluate the consistency of model predictions with human data + # NOTE: score output here is *already* ceiling-normalized!! + score = self._metric(probabilities, self._human_assembly) + + return score + + +def Maniquet2024ConfusionSimilarity(): + return _Maniquet2024ConfusionSimilarity() + + +def Maniquet2024TasksConsistency(): + return _Maniquet2024TasksConsistency() diff --git a/brainscore_vision/benchmarks/maniquet2024/requirements.txt b/brainscore_vision/benchmarks/maniquet2024/requirements.txt new file mode 100644 index 000000000..f0f99cfe1 --- /dev/null +++ b/brainscore_vision/benchmarks/maniquet2024/requirements.txt @@ -0,0 +1,3 @@ +numpy +scikit-learn +scipy \ No newline at end of file diff --git a/brainscore_vision/benchmarks/maniquet2024/test.py b/brainscore_vision/benchmarks/maniquet2024/test.py new file mode 100644 index 000000000..c0d8012b3 --- /dev/null +++ b/brainscore_vision/benchmarks/maniquet2024/test.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Mon Jun 24 17:22:59 2024 + +@author: costantino_ai +""" + +import pytest +from brainscore_vision import load_benchmark + +@pytest.mark.parametrize('benchmark', [ + 'Maniquet2024-confusion_similarity', + 'Maniquet2024-tasks_consistency', +]) +def test_benchmark_registry(benchmark): + assert load_benchmark(benchmark) is not None diff --git a/brainscore_vision/data/maniquet2024/__init__.py b/brainscore_vision/data/maniquet2024/__init__.py new file mode 100644 index 000000000..3fc2bc3b3 --- /dev/null +++ b/brainscore_vision/data/maniquet2024/__init__.py @@ -0,0 +1,59 @@ +from brainio.assemblies import BehavioralAssembly +from brainscore_vision import stimulus_set_registry, data_registry +from brainscore_vision.data_helpers.s3 import ( + load_assembly_from_s3, + load_stimulus_set_from_s3, +) + +BIBTEX = """@article {Maniquet2024.04.02.587669, + author = {Maniquet, Tim and de Beeck, Hans Op and Costantino, Andrea Ivan}, + title = {Recurrent issues with deep neural network models of visual recognition}, + elocation-id = {2024.04.02.587669}, + year = {2024}, + doi = {10.1101/2024.04.02.587669}, + publisher = {Cold Spring Harbor Laboratory}, + URL = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669}, + eprint = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669.full.pdf}, + journal = {bioRxiv} +}""" + +# Human Stimulus Set +stimulus_set_registry["Maniquet2024"] = lambda: load_stimulus_set_from_s3( + identifier="Maniquet2024", + bucket="brainio-brainscore", + csv_sha1="ec61e1d7776a6c3b467fee862302edac8d4a156e", + zip_sha1="bbdaf09528974c4ca3ee4cddbc91e0e03351291f", + csv_version_id="HwA7hBw0KVt6O.S_eDTXCHjOxfXlK_N3", + zip_version_id="lDUmFncDxloQp_9.S3VcpiOIPa1sCr7N", +) + + +# DNN test Stimulus Set +stimulus_set_registry["Maniquet2024-test"] = lambda: load_stimulus_set_from_s3( + identifier="Maniquet2024-test", + bucket="brainio-brainscore", + csv_sha1="993089ba4aaeffbc61303acb2a5171a5fa271ea5", + zip_sha1="39f9aaf13fdd66d284bcea99f187bb0c065144e4", + csv_version_id="G8mwsgXbuaodl_icHRzA9_LK1LeF1mco", + zip_version_id="O05BqRf79q78oQJXcN.iPeeEwNSOF2iS", +) + +# DNN train Stimulus Set +stimulus_set_registry["Maniquet2024-train"] = lambda: load_stimulus_set_from_s3( + identifier="Maniquet2024-train", + bucket="brainio-brainscore", + csv_sha1="da965af3ae5ab6e49d46c28f682ef4b75d0a2045", + zip_sha1="6685effb52f6870175988c47892b3f9a916a0375", + csv_version_id="1y.4Een3cC_ju8lqOZcSeLTXxsoPq5Wg", + zip_version_id="WUCsCnvwUWVSLaioFsKXrxpOGdIMt8ij", +) + + +# Human Data Assembly (behavioural) +data_registry["Maniquet2024"] = lambda: load_assembly_from_s3( + identifier="Maniquet2024", + version_id="ppAs1vv02btHmfmUMtLejawBuA96Iv2j", + sha1="39b8b7b29fad080ebba6df8a46ac4426261342d5", + bucket="brainio-brainscore", + cls=BehavioralAssembly, +) diff --git a/brainscore_vision/data/maniquet2024/data_packaging.py b/brainscore_vision/data/maniquet2024/data_packaging.py new file mode 100644 index 000000000..c03f4739b --- /dev/null +++ b/brainscore_vision/data/maniquet2024/data_packaging.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sat Jun 22 00:24:30 2024 + +@author: costantino_ai +""" + +import os +import logging +from pathlib import Path +import pandas as pd +from brainio.assemblies import BehavioralAssembly +from brainio.stimuli import StimulusSet +from brainio.packaging import package_data_assembly, package_stimulus_set + +# Setup logging +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" +) + +# Constants +ROOT_DIRECTORY = "./maniquet2024/private" +TAG = "Maniquet2024" + + +def load_stimulus_set(stimuli_directory, tag): + """ + Load and package stimuli from the specified directory. + + Args: + stimuli_directory (str): Directory containing stimulus files. + tag (str): Tag to assign to the stimulus set. + + Returns: + StimulusSet: Packaged set of stimuli with metadata. + """ + logging.info("Loading stimuli from directory: %s", stimuli_directory) + stimuli = [] + stimulus_paths = {} + + for filepath in Path(stimuli_directory).glob("*.png"): + stimulus_id = filepath.stem + parts = filepath.stem.split("_") + exemplar_number, manipulation, manipulation_details, category = ( + parts[0], + parts[1], + parts[2], + parts[3], + ) + + stimulus_paths[stimulus_id] = filepath + stimuli.append( + { + "stimulus_id": stimulus_id, + "manipulation": manipulation, + "manipulation_details": manipulation_details, + "image_label": category, + "exemplar_number": exemplar_number, + } + ) + + stimulus_set = StimulusSet(stimuli) + stimulus_set.stimulus_paths = stimulus_paths + stimulus_set.name = tag + logging.info("Total stimuli loaded: %d", len(stimulus_set)) + return stimulus_set + + +def load_behavioral_data(data_file, tag): + """ + Load and package experimental data from a CSV file. + + Args: + data_file (str): Path to the CSV file containing experimental data. + tag (str): Tag to assign to the behavioral data assembly. + + Returns: + BehavioralAssembly: Data assembly of behavioral responses. + """ + logging.info("Loading behavioral data from file: %s", data_file) + df = pd.read_csv(data_file) + assembly = BehavioralAssembly( + df["acc"], + dims=["presentation"], + coords={ + "stimulus_id": ("presentation", df["stimulus_id"].values), + "manipulation": ("presentation", df["condition"].values), + "manipulation_details": ("presentation", df["task_details"].values), + "mask": ("presentation", df["mask"].values), + "image_label": ("presentation", df["category"].values), + "prediction": ("presentation", df["prediction"].values), + "response": ("presentation", df["response"].values), + "reaction_time": ("presentation", df["rt"].values), + "subject_id": ("presentation", df["subj"].values), + "task": ("presentation", df["task_long"].values), + }, + ) + assembly.name = tag + logging.info( + "Data assembly loaded with %d presentations", len(assembly["presentation"]) + ) + return assembly + + +def main(): + """ + Main function to package stimulus set and experimental data, and upload to S3. + """ + logging.info("Starting the data packaging process.") + + # Load stimuli from directories + human_stimuli_directory = os.path.join(ROOT_DIRECTORY, "human_stimuli") + dnntest_stimuli_directory = os.path.join(ROOT_DIRECTORY, "dnn_stimuli/test") + dnntrain_stimuli_directory = os.path.join(ROOT_DIRECTORY, "dnn_stimuli/train") + + human_stimulus_set = load_stimulus_set(human_stimuli_directory, TAG) + dnntest_stimulus_set = load_stimulus_set(dnntest_stimuli_directory, f"{TAG}-test") + dnntrain_stimulus_set = load_stimulus_set(dnntrain_stimuli_directory, f"{TAG}-train") + + # Upload stimuli + human_stimulus_meta = package_stimulus_set( + None, human_stimulus_set, human_stimulus_set.name, "brainio-brainscore" + ) + dnntest_stimulus_meta = package_stimulus_set( + None, dnntest_stimulus_set, dnntest_stimulus_set.name, "brainio-brainscore" + ) + dnntrain_stimulus_meta = package_stimulus_set( + None, dnntrain_stimulus_set, dnntrain_stimulus_set.name, "brainio-brainscore" + ) + + # Load human data assembly + data_file = os.path.join(ROOT_DIRECTORY, "data/human_data_andrea.csv") + data_assembly = load_behavioral_data(data_file, TAG) + assembly_meta = package_data_assembly( + None, + data_assembly, + data_assembly.name, + human_stimulus_set.name, + "BehavioralAssembly", + "brainio-brainscore", + ) + + # print(human_stimulus_meta) + # print(dnntest_stimulus_meta) + # print(dnntrain_stimulus_meta) + # print(assembly_meta) + + +if __name__ == "__main__": + main() diff --git a/brainscore_vision/data/maniquet2024/test.py b/brainscore_vision/data/maniquet2024/test.py new file mode 100644 index 000000000..85469c40a --- /dev/null +++ b/brainscore_vision/data/maniquet2024/test.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Jun 20 17:32:25 2024 + +@author: costantino_ai +""" +import pytest +from brainscore_vision import load_dataset, load_stimulus_set + +@pytest.mark.private_access +def test_existence(): + assert load_stimulus_set('Maniquet2024') is not None + assert load_dataset('Maniquet2024') is not None + + \ No newline at end of file diff --git a/brainscore_vision/metrics/maniquet2024_metrics/__init__.py b/brainscore_vision/metrics/maniquet2024_metrics/__init__.py new file mode 100644 index 000000000..19f9e2ea8 --- /dev/null +++ b/brainscore_vision/metrics/maniquet2024_metrics/__init__.py @@ -0,0 +1,19 @@ +from brainscore_vision import metric_registry +from .metric import ConfusionSimilarity, TasksConsistency + +BIBTEX = """@article {Maniquet2024.04.02.587669, + author = {Maniquet, Tim and de Beeck, Hans Op and Costantino, Andrea Ivan}, + title = {Recurrent issues with deep neural network models of visual recognition}, + elocation-id = {2024.04.02.587669}, + year = {2024}, + doi = {10.1101/2024.04.02.587669}, + publisher = {Cold Spring Harbor Laboratory}, + URL = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669}, + eprint = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669.full.pdf}, + journal = {bioRxiv} +}""" + +metric_registry['confusion_similarity'] = ConfusionSimilarity +metric_registry['tasks_consistency'] = TasksConsistency + + diff --git a/brainscore_vision/metrics/maniquet2024_metrics/metric.py b/brainscore_vision/metrics/maniquet2024_metrics/metric.py new file mode 100644 index 000000000..97a57733a --- /dev/null +++ b/brainscore_vision/metrics/maniquet2024_metrics/metric.py @@ -0,0 +1,443 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Jun 23 00:44:19 2024 + +@author: costantino_ai +""" +import itertools +import numpy as np +from sklearn.metrics import confusion_matrix +from scipy.stats import pearsonr +from brainscore_core.metrics import Metric, Score +from brainio.assemblies import BehavioralAssembly + + +class ConfusionSimilarity(Metric): + """ + A metric to compute the similarity between model-generated confusion matrices and human confusion data. + + Methods: + _extract_subjects(assembly): Extracts and sorts unique subject identifiers from the assembly. + _rollout_matrix(matrix, remove_diagonal=True): Flattens a matrix into a vector, optionally removing diagonal elements. + _label_from_probability(probabilities): Derives predicted labels from probabilities. + _accuracy(y_true, y_pred): Calculates the accuracy of predictions. + _ceiling(assembly, precomputed=True): Computes the ceiling performance by assessing the highest correlation across subjects. + __call__(probabilities, human_assembly): Computes the correlation between model and human confusion matrices normalized by the ceiling. + """ + + def _extract_subjects(self, assembly): + """ + Extracts and sorts unique subject identifiers from the assembly. + + Args: + assembly (xarray.Dataset): The data assembly containing subject IDs. + + Returns: + list: Sorted list of unique subject IDs. + """ + return list(sorted(set(assembly["subject_id"].values))) + + def _rollout_matrix(self, matrix, remove_diagonal=True): + """ + Flattens a matrix into a vector. Optionally removes diagonal elements to ignore self-comparison. + + Args: + matrix (np.array): A square matrix. + remove_diagonal (bool): Whether to remove the diagonal elements of the matrix. + + Returns: + np.array: The flattened matrix as a vector. + """ + if remove_diagonal: + # Create a mask to remove diagonal elements from the matrix. + mask = np.eye(matrix.shape[0], dtype=bool) + return matrix[~mask].ravel() + else: + return matrix.ravel() + + def _label_from_probability(self, probabilities): + """ + Derives predicted labels from probabilities by selecting the class with the highest probability. + + Args: + probabilities (xarray.Dataset): Dataset containing class probabilities. + + Returns: + tuple: Arrays of true labels and predicted labels. + """ + # Extract the class with the highest probability for each instance. + classes = probabilities.choice.values + indices = np.argmax(probabilities.values, axis=1) + y_pred = classes[indices] + y_true = probabilities.image_label.values + return y_true, y_pred + + def _accuracy(self, y_true, y_pred): + """ + Calculates the accuracy of predictions. + + Args: + y_true (np.array): True labels. + y_pred (np.array): Predicted labels. + + Returns: + float: The accuracy of the predictions. + """ + return sum(y_true == y_pred) / len(y_pred) + + def _ceiling(self, assembly, precomputed=True): + """ + Compute the noise ceiling of a confusion matrix using split-half correlations. + + Args: + assembly: (Human) Assembly with expected columns 'predicted'and 'image_label'. + precomputed (Bool): If true, use precomputed ceiling measure to save time. + + Returns: + score (float): Noise ceiling average. + """ + if precomputed: + # This is to save quite a lot of time. It was precomputed on the Maniquet2024 + # human data assembly, which includes 218 participants tested on the + # Maniquet2024 stimulus set. + return 0.54007 + + # Get labels and subjects lists + labels = list(set(assembly.image_label.values)) + subjects = self._extract_subjects(assembly) + + # Start recording correlation scores + correlation_scores = [] + for subject in subjects: + + # Select data from a single subject + subj_df = assembly.sel(subject_id=subject) + + # Split it in two randomly + n_rows = int(np.round(len(subj_df) / 2)) + half = np.random.randint(0, len(subj_df), size=n_rows) + part_one, part_two = subj_df[half], subj_df[~half] + + # Compute confusion matrix for each half + cm_one = confusion_matrix( + y_true=part_one["image_label"], + y_pred=part_one["prediction"], + labels=labels, + ) + cm_two = confusion_matrix( + y_true=part_two["image_label"], + y_pred=part_two["prediction"], + labels=labels, + ) + + # Compute Pearson correlation between the two confusion matrices. + correlation_score = pearsonr( + self._rollout_matrix(cm_one), + self._rollout_matrix(cm_two), + )[0] + correlation_scores.append(correlation_score) + + # Average correlations as a measure of reliability + ceiling = np.mean(correlation_scores) + + return ceiling + + def __call__( + self, probabilities: BehavioralAssembly, human_assembly: BehavioralAssembly + ) -> Score: + """ + Computes the correlation between model and human confusion matrices normalized by the ceiling. + + Args: + probabilities (BehavioralAssembly): Model's predicted probabilities. + human_assembly (BehavioralAssembly): Human baseline responses. + + Returns: + Score: The normalized correlation score as a performance metric. + """ + assert sorted(set(probabilities.choice.values)) == sorted( + set(human_assembly.image_label.values) + ) + + # Extract labels from the model probabilities. + y_true, y_pred = self._label_from_probability(probabilities) + + # Calculate the model's confusion matrix. + dnn_confmat = confusion_matrix( + y_true=y_true, y_pred=y_pred, labels=probabilities.choice.values + ) + + # Calculate the human confusion matrix. + human_confmat = confusion_matrix( + y_true=human_assembly["image_label"], + y_pred=human_assembly["prediction"], + labels=probabilities.choice.values, + ) + + # Compute the Pearson correlation between the model and human confusion matrices. + correlation_score = pearsonr( + self._rollout_matrix(human_confmat), self._rollout_matrix(dnn_confmat) + )[0] + ceiling = self._ceiling(human_assembly, precomputed=True) + + # Normalize by ceiling + score = Score(correlation_score / ceiling) + score.attrs["raw"] = correlation_score + score.attrs["ceiling"] = ceiling + + return score + + +class TasksConsistency(Metric): + """ + A metric to compute the consistency between model and human accuracy profiles across different tasks. + + Methods: + _extract_subjects(assembly): Extracts and sorts unique subject identifiers from the assembly. + _extract_tasks(assembly): Extracts and sorts unique task identifiers from the assembly. + _rollout_matrix(matrix, remove_diagonal=True): Flattens a matrix into a vector, optionally removing diagonal elements. + _label_from_probability(probabilities): Derives predicted labels from probabilities. + _accuracy(y_true, y_pred): Calculates the accuracy of predictions. + _ceiling(assembly, precomputed=True): Computes the ceiling performance by assessing the highest correlation across subjects. + _map_human_to_dnn_categories(human_task): Maps a human task name to the corresponding DNN categories of 'manipulation' and 'manipulation_details'. + __call__(probabilities, human_assembly): Computes the correlation between model and human confusion matrices normalized by the ceiling. + """ + + def _extract_subjects(self, assembly): + """ + Extracts and sorts unique subject identifiers from the assembly. + + Args: + assembly (xarray.Dataset): The data assembly containing subject IDs. + + Returns: + list: Sorted list of unique subject IDs. + """ + return list(sorted(set(assembly["subject_id"].values))) + + def _extract_tasks(self, assembly): + """ + Extracts and sorts unique task identifiers from the assembly. + + Args: + assembly (xarray.Dataset): The data assembly containing task IDs. + + Returns: + list: Sorted list of unique task IDs. + """ + return list(sorted(set(assembly["task"].values))) + + def _rollout_matrix(self, matrix, remove_diagonal=True): + """ + Flattens a matrix into a vector. Optionally removes diagonal elements to ignore self-comparison. + + Args: + matrix (np.array): A square matrix. + remove_diagonal (bool): Whether to remove the diagonal elements of the matrix. + + Returns: + np.array: The flattened matrix as a vector. + """ + if remove_diagonal: + # Create a mask to remove diagonal elements from the matrix. + mask = np.eye(matrix.shape[0], dtype=bool) + return matrix[~mask].ravel() + else: + return matrix.ravel() + + def _label_from_probability(self, probabilities): + """ + Derives predicted labels from probabilities by selecting the class with the highest probability. + + Args: + probabilities (xarray.Dataset): Dataset containing class probabilities. + + Returns: + tuple: Arrays of true labels and predicted labels. + """ + # Extract the class with the highest probability for each instance. + classes = probabilities.choice.values + indices = np.argmax(probabilities.values, axis=1) + y_pred = classes[indices] + y_true = probabilities.image_label.values + return y_true, y_pred + + def _accuracy(self, y_true, y_pred): + """ + Calculates the accuracy of predictions. + + Args: + y_true (np.array): True labels. + y_pred (np.array): Predicted labels. + + Returns: + float: The accuracy of the predictions. + """ + return sum(y_true == y_pred) / len(y_pred) + + def _ceiling(self, assembly, precomputed=True): + """ + Computes the ceiling performance by assessing the average split-half correlation across subjects. + + Args: + assembly (xarray.Dataset): The data assembly containing subject data. + precomputed (bool): Whether to use precomputed ceiling value. + + Returns: + Score: The average correlation score across all subject pairs. + """ + if precomputed: + # This precomputed value is based on the Maniquet2024 human data assembly, + # which includes 218 participants tested on the Maniquet2024 stimulus set. + return 0.99810 + + # Initialize an empty list to store correlations for each iteration + iter_task_correlations = [] + + # Perform 50 iterations for split-half correlation + for i in range(50): + + # Randomly split the data assembly into two halves + n_rows = int(np.round(len(assembly) / 2)) + half = np.random.randint(0, len(assembly), size=n_rows) + part_one, part_two = assembly[half], assembly[~half] + + # Extract performance vectors for each half across all tasks + perf_vec_one = [ + float(np.mean(part_one[part_one["task"] == task])) + for task in self.human_tasks + ] + perf_vec_two = [ + float(np.mean(part_two.loc[part_two["task"] == task])) + for task in self.human_tasks + ] + + # Calculate the Pearson correlation between the performance vectors of the two halves + corr_perf = pearsonr(perf_vec_one, perf_vec_two)[0] + + # Append the correlation result to the list for this iteration + iter_task_correlations.append(corr_perf) + + return np.mean(iter_task_correlations) + + def _map_human_to_dnn_categories(self, human_task): + """ + Maps a human task name to the corresponding DNN categories of 'manipulation' and 'manipulation_details'. + + Args: + human_task (str): A task name from the human tasks list. + + Returns: + tuple: A tuple where the first element is the 'manipulation' and the second is 'manipulation_details'. + """ + # Mapping based on the provided details + manipulation_mapping = { + "clutter": "clutter", + "control": "control", + "occlusion": "occluder", + "scrambling": "phasescrambling", + } + + detail_mapping = { + "heavy": "heavy", + "light": "light", + "highpass": "highpass", + "lowpass": "lowpass", + "few_large_blobs_high": "fewlarge-high", + "few_large_blobs_low": "fewlarge-low", + "few_large_deletion_high": "fewlarge-high", + "few_large_deletion_low": "fewlarge-low", + "many_small_blobs_high": "manysmall-high", + "many_small_blobs_low": "manysmall-low", + "many_small_deletion_high": "manysmall-high", + "many_small_deletion_low": "manysmall-low", + "few_large_partial_viewing_high": "fewlarge-high", + "few_large_partial_viewing_low": "fewlarge-low", + "many_small_partial_viewing_high": "manysmall-high", + "many_small_partial_viewing_low": "manysmall-low", + } + + parts = human_task.split("_") + if "control" in parts: + # Handle control separately as it doesn't fit other patterns + return ("control", "control") + + # Determine manipulation by first relevant keyword + manipulation = next( + (manipulation_mapping[key] for key in manipulation_mapping if key in parts), + None, + ) + + # Construct a detail key from remaining parts excluding known manipulation keys + detail_parts = [part for part in parts if part not in manipulation_mapping] + detail_key = "_".join(detail_parts) + + # Find the matching manipulation detail + manipulation_detail = detail_mapping.get( + detail_key, "control" + ) # Default to control if no match found + + return (manipulation, manipulation_detail) + + def __call__( + self, probabilities: BehavioralAssembly, human_assembly: BehavioralAssembly + ) -> Score: + """ + Computes the correlation between model and human accuracy profiles across tasks, normalized by the ceiling. + + Args: + probabilities (BehavioralAssembly): Model's predicted probabilities. + human_assembly (BehavioralAssembly): Human baseline responses. + + Returns: + Score: The normalized correlation score as a performance metric. + """ + assert sorted(set(probabilities.choice.values)) == sorted( + set(human_assembly.image_label.values) + ) + + # Get list of tasks + self.human_tasks = self._extract_tasks(human_assembly) + + # Store accuracies + dnn_accs = [] + human_accs = [] + + # Calculate the model's accuracy across tasks. + for human_task in self.human_tasks: + # Convert the human task into DNN lingo + manipulation, manipulation_details = self._map_human_to_dnn_categories( + human_task + ) + + # Extract labels from the model probabilities. + probabilities_filtered = probabilities[ + (probabilities["manipulation"] == manipulation) + & (probabilities["manipulation_details"] == manipulation_details) + ] + + dnn_y_true, dnn_y_pred = self._label_from_probability(probabilities_filtered) + dnn_acc = self._accuracy(dnn_y_true, dnn_y_pred) + dnn_accs.append(dnn_acc) + + # Extract labels from the human responses. + human_responses_filtered = human_assembly[ + human_assembly["task"] == human_task + ] + human_acc = self._accuracy( + human_responses_filtered["image_label"], + human_responses_filtered["prediction"], + ) + human_accs.append(human_acc) + + # Compute the Pearson correlation between the model and human accuracy profiles. + correlation_score = pearsonr(dnn_accs, human_accs)[0] + ceiling = self._ceiling(human_assembly, precomputed=True) + + # Normalize by ceiling + score = Score(correlation_score / ceiling) + score.attrs["raw"] = correlation_score + score.attrs["ceiling"] = ceiling + + return score diff --git a/brainscore_vision/metrics/maniquet2024_metrics/requirements.txt b/brainscore_vision/metrics/maniquet2024_metrics/requirements.txt new file mode 100644 index 000000000..595f57d71 --- /dev/null +++ b/brainscore_vision/metrics/maniquet2024_metrics/requirements.txt @@ -0,0 +1,3 @@ +numpy +sklearn +scipy \ No newline at end of file diff --git a/brainscore_vision/metrics/maniquet2024_metrics/test.py b/brainscore_vision/metrics/maniquet2024_metrics/test.py new file mode 100644 index 000000000..496156fc7 --- /dev/null +++ b/brainscore_vision/metrics/maniquet2024_metrics/test.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Jun 23 13:40:53 2024 + +@author: costantino_ai +""" + From 63d92b9afcf3a04cd4d49eab9b08aa1444e7f5ed Mon Sep 17 00:00:00 2001 From: Michael Ferguson Date: Sat, 6 Jul 2024 08:49:17 -0400 Subject: [PATCH 46/68] Add baker2022 benchmark (competition) (#999) * add data plugin * add metric and benchmark plugin * benchmark now produces scores * benchmark tests complete * metric tests complete * removed unused requirements.txt --------- Co-authored-by: Martin Schrimpf --- .../benchmarks/baker2022/__init__.py | 9 ++ .../benchmarks/baker2022/benchmark.py | 124 ++++++++++++++++ .../benchmarks/baker2022/requirements.txt | 1 + .../benchmarks/baker2022/test.py | 88 ++++++++++++ brainscore_vision/data/baker2022/__init__.py | 40 ++++++ .../inverted_distortion_data_assembly.py | 43 ++++++ .../inverted_distortion_stimulus_set.py | 81 +++++++++++ .../data/baker2022/data_packaging/mapping.py | 60 ++++++++ .../normal_distortion_data_assembly.py | 46 ++++++ .../normal_distortion_stimulus_set.py | 94 ++++++++++++ brainscore_vision/data/baker2022/test.py | 135 ++++++++++++++++++ .../metrics/baker_accuracy_delta/__init__.py | 4 + .../metrics/baker_accuracy_delta/metric.py | 95 ++++++++++++ .../baker_accuracy_delta/requirements.txt | 1 + .../metrics/baker_accuracy_delta/test.py | 1 + 15 files changed, 822 insertions(+) create mode 100644 brainscore_vision/benchmarks/baker2022/__init__.py create mode 100644 brainscore_vision/benchmarks/baker2022/benchmark.py create mode 100644 brainscore_vision/benchmarks/baker2022/requirements.txt create mode 100644 brainscore_vision/benchmarks/baker2022/test.py create mode 100644 brainscore_vision/data/baker2022/__init__.py create mode 100644 brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py create mode 100644 brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py create mode 100644 brainscore_vision/data/baker2022/data_packaging/mapping.py create mode 100644 brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py create mode 100644 brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py create mode 100644 brainscore_vision/data/baker2022/test.py create mode 100644 brainscore_vision/metrics/baker_accuracy_delta/__init__.py create mode 100644 brainscore_vision/metrics/baker_accuracy_delta/metric.py create mode 100644 brainscore_vision/metrics/baker_accuracy_delta/requirements.txt create mode 100644 brainscore_vision/metrics/baker_accuracy_delta/test.py diff --git a/brainscore_vision/benchmarks/baker2022/__init__.py b/brainscore_vision/benchmarks/baker2022/__init__.py new file mode 100644 index 000000000..5bbad98d1 --- /dev/null +++ b/brainscore_vision/benchmarks/baker2022/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import benchmark_registry +from brainscore_vision.benchmarks.baker2022.benchmark import Baker2022AccuracyDeltaFrankenstein, \ + Baker2022AccuracyDeltaFragmented, Baker2022InvertedAccuracyDelta + +DATASETS = ['normal', 'inverted'] + +benchmark_registry['Baker2022-accuracy_delta_frankenstein'] = lambda: Baker2022AccuracyDeltaFrankenstein() +benchmark_registry['Baker2022-accuracy_delta_fragmented'] = lambda: Baker2022AccuracyDeltaFragmented() +benchmark_registry['Baker2022-inverted_accuracy_delta'] = lambda: Baker2022InvertedAccuracyDelta() diff --git a/brainscore_vision/benchmarks/baker2022/benchmark.py b/brainscore_vision/benchmarks/baker2022/benchmark.py new file mode 100644 index 000000000..0473cc5a9 --- /dev/null +++ b/brainscore_vision/benchmarks/baker2022/benchmark.py @@ -0,0 +1,124 @@ +import numpy as np +import numpy.random +from brainscore_vision.benchmarks import BenchmarkBase +from brainscore_vision.benchmark_helpers.screen import place_on_screen +from brainscore_vision.metrics import Score +from brainscore_vision.metrics.baker_accuracy_delta.metric import BakerAccuracyDelta, compute_ceiling +from brainscore_vision.model_interface import BrainModel +from brainscore_vision.utils import LazyLoad +from typing import List +from brainio.assemblies import DataAssembly +from brainscore_vision import load_dataset + +BIBTEX = """@article{BAKER2022104913, + title = {Deep learning models fail to capture the configural nature of human shape perception}, + journal = {iScience}, + volume = {25}, + number = {9}, + pages = {104913}, + year = {2022}, + issn = {2589-0042}, + doi = {https://doi.org/10.1016/j.isci.2022.104913}, + url = {https://www.sciencedirect.com/science/article/pii/S2589004222011853}, + author = {Nicholas Baker and James H. Elder}, + keywords = {Biological sciences, Neuroscience, Sensory neuroscience}, + abstract = {Summary + A hallmark of human object perception is sensitivity to the holistic configuration of the local shape features of an object. Deep convolutional neural networks (DCNNs) are currently the dominant models for object recognition processing in the visual cortex, but do they capture this configural sensitivity? To answer this question, we employed a dataset of animal silhouettes and created a variant of this dataset that disrupts the configuration of each object while preserving local features. While human performance was impacted by this manipulation, DCNN performance was not, indicating insensitivity to object configuration. Modifications to training and architecture to make networks more brain-like did not lead to configural processing, and none of the networks were able to accurately predict trial-by-trial human object judgements. We speculate that to match human configural sensitivity, networks must be trained to solve a broader range of object tasks beyond category recognition.} + }""" + +DATASETS = ['normal', 'inverted'] + + +class _Baker2022AccuracyDelta(BenchmarkBase): + def __init__(self, dataset: str, image_types: List[str]): + """ + :param dataset: orientation of stimuli. Either 'normal' or 'inverted' + :param image_types: Either ["w", "f"] for frankenstein delta or ["w", "o"] for fragmented delta + """ + self._metric = BakerAccuracyDelta(image_types=image_types) + self.image_types = image_types + self.orientation = dataset + self._ceiling = SplitHalvesConsistencyBaker(num_splits=100, split_coordinate="subject", + image_types=self.image_types) + self._assembly = LazyLoad(lambda: load_assembly(dataset)) + self._visual_degrees = 8.8 + self._number_of_trials = 1 + + super(_Baker2022AccuracyDelta, self).__init__( + identifier=f'Baker2022{dataset}-accuracy_delta', version=1, + ceiling_func=lambda: self._ceiling(assembly=self._assembly), + parent='Baker2022', + bibtex=BIBTEX) + + def __call__(self, candidate: BrainModel): + choice_labels = set(self._assembly['truth'].values) + choice_labels = list(sorted(choice_labels)) + candidate.start_task(BrainModel.Task.label, choice_labels) + stimulus_set = place_on_screen(self._assembly.stimulus_set, target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees) + stimuli = stimulus_set[stimulus_set["orientation"] == self.orientation] + labels = candidate.look_at(stimuli, number_of_trials=self._number_of_trials) + assembly = self._assembly[self._assembly["orientation"] == self.orientation] + raw_score = self._metric(labels, assembly) + ceiling = self._ceiling(assembly) + score = raw_score / ceiling + + # cap score at 1 if ceiled score > 1 + if score[(score['aggregation'] == 'center')] > 1: + score.__setitem__({'aggregation': score['aggregation'] == 'center'}, 1) + + score.attrs['raw'] = raw_score + score.attrs['ceiling'] = ceiling + return score + + +def Baker2022AccuracyDeltaFrankenstein(): + return _Baker2022AccuracyDelta(dataset='normal', image_types=["w", "f"]) + + +def Baker2022AccuracyDeltaFragmented(): + return _Baker2022AccuracyDelta(dataset='normal', image_types=["w", "o"]) + + +def Baker2022InvertedAccuracyDelta(): + return _Baker2022AccuracyDelta(dataset='inverted', image_types=["w", "f"]) + + +def load_assembly(dataset): + assembly = load_dataset(f'Baker2022_{dataset}_distortion') + return assembly + + +# ceiling method: +class SplitHalvesConsistencyBaker: + def __init__(self, num_splits: int, split_coordinate: str, image_types): + """ + :param num_splits: how many times to create two halves + :param split_coordinate: over which coordinate to split the assembly into halves + """ + self.num_splits = num_splits + self.split_coordinate = split_coordinate + self.image_types = image_types + + def __call__(self, assembly: DataAssembly) -> Score: + consistencies, uncorrected_consistencies = [], [] + splits = range(self.num_splits) + random_state = np.random.RandomState(0) + for _ in splits: + num_subjects = len(set(assembly["subject"].values)) + half1_subjects = random_state.choice(range(1, num_subjects), (num_subjects // 2), replace=False) + half1 = assembly[ + {'presentation': [subject in half1_subjects for subject in assembly['subject'].values]}] + half2 = assembly[ + {'presentation': [subject not in half1_subjects for subject in assembly['subject'].values]}] + consistency = compute_ceiling(half1, half2, self.image_types) + uncorrected_consistencies.append(consistency) + # Spearman-Brown correction for sub-sampling + corrected_consistency = 2 * consistency / (1 + (2 - 1) * consistency) + consistencies.append(corrected_consistency) + consistencies = Score(consistencies, coords={'split': splits}, dims=['split']) + uncorrected_consistencies = Score(uncorrected_consistencies, coords={'split': splits}, dims=['split']) + average_consistency = consistencies.median('split') + average_consistency.attrs['raw'] = consistencies + average_consistency.attrs['uncorrected_consistencies'] = uncorrected_consistencies + return average_consistency diff --git a/brainscore_vision/benchmarks/baker2022/requirements.txt b/brainscore_vision/benchmarks/baker2022/requirements.txt new file mode 100644 index 000000000..296d65452 --- /dev/null +++ b/brainscore_vision/benchmarks/baker2022/requirements.txt @@ -0,0 +1 @@ +numpy \ No newline at end of file diff --git a/brainscore_vision/benchmarks/baker2022/test.py b/brainscore_vision/benchmarks/baker2022/test.py new file mode 100644 index 000000000..6b58d06c2 --- /dev/null +++ b/brainscore_vision/benchmarks/baker2022/test.py @@ -0,0 +1,88 @@ +from pathlib import Path +import pytest +from pytest import approx +from brainio.assemblies import BehavioralAssembly +from brainscore_vision import benchmark_registry, load_benchmark +from brainscore_vision.benchmarks.baker2022 import DATASETS +from brainscore_vision.benchmark_helpers import PrecomputedFeatures +from brainscore_vision.data_helpers import s3 + + +@pytest.mark.private_access +class TestBaker2022: + + # ensure normal and inverted datasets are there + def test_count(self): + assert len(DATASETS) == 2 + + # ensure the three benchmarks themselves are there + @pytest.mark.parametrize('benchmark', [ + 'Baker2022-inverted_accuracy_delta', + 'Baker2022-accuracy_delta_fragmented', + 'Baker2022-inverted_accuracy_delta' + ]) + def test_in_pool(self, benchmark): + assert benchmark in benchmark_registry + + # Test expected ceiling + @pytest.mark.parametrize('benchmark, expected_ceiling', [ + ('Baker2022-accuracy_delta_frankenstein', 0.8498), + ('Baker2022-accuracy_delta_fragmented', 0.9385), + ('Baker2022-inverted_accuracy_delta', 0.6538), + ]) + def test_benchmark_ceiling(self, benchmark, expected_ceiling): + benchmark = load_benchmark(benchmark) + assembly = benchmark._assembly + if "inverted" in benchmark.identifier: + inverted_assembly = assembly[assembly["orientation"] == "inverted"] + ceiling = benchmark._ceiling(inverted_assembly) + else: + ceiling = benchmark._ceiling(assembly) + assert ceiling == approx(expected_ceiling, abs=0.001) + + # Test raw scores + @pytest.mark.parametrize('benchmark, model, expected_raw_score', [ + ('Baker2022-accuracy_delta_frankenstein', 'resnet-50-pytorch', approx(0.2847, abs=0.0001)), + ('Baker2022-accuracy_delta_fragmented', 'resnet-50-pytorch', approx(0.8452, abs=0.0001)), + ('Baker2022-inverted_accuracy_delta', 'resnet-50-pytorch', approx(0.0, abs=0.0001)), + ('Baker2022-accuracy_delta_frankenstein', 'resnet50-SIN', approx(0.6823, abs=0.0001)), + ('Baker2022-accuracy_delta_fragmented', 'resnet50-SIN', approx(0.9100, abs=0.0001)), + ('Baker2022-inverted_accuracy_delta', 'resnet50-SIN', approx(0.7050, abs=0.0001)), + ]) + def test_model_raw_score(self, benchmark, model, expected_raw_score): + + benchmark_object = load_benchmark(benchmark) + filename = f"{model}-{benchmark}.nc" + precomputed_features = Path(__file__).parent / filename + s3.download_file_if_not_exists(precomputed_features, + bucket='brainscore-vision', + remote_filepath=f'benchmarks/Baker2022/{filename}') + precomputed_features = BehavioralAssembly.from_files(file_path=precomputed_features) + precomputed_features = PrecomputedFeatures(precomputed_features, visual_degrees=8) + score = benchmark_object(precomputed_features) + raw_score = score.raw + + # division by ceiling <= 1 should result in higher score + assert score.sel(aggregation='center') >= raw_score.sel(aggregation='center') + assert raw_score.sel(aggregation='center') == expected_raw_score + + # test ceiled score + @pytest.mark.parametrize('benchmark, model, expected_ceiled_score', [ + ('Baker2022-accuracy_delta_frankenstein', 'resnet-50-pytorch', approx(0.3350, abs=0.0001)), + ('Baker2022-accuracy_delta_fragmented', 'resnet-50-pytorch', approx(0.9005, abs=0.0001)), + ('Baker2022-inverted_accuracy_delta', 'resnet-50-pytorch', approx(0.0, abs=0.0001)), + ('Baker2022-accuracy_delta_frankenstein', 'resnet50-SIN', approx(0.8029, abs=0.0001)), + ('Baker2022-accuracy_delta_fragmented', 'resnet50-SIN', approx(0.9696, abs=0.0001)), + ('Baker2022-inverted_accuracy_delta', 'resnet50-SIN', approx(1.000, abs=0.0001)), + ]) + def test_model_ceiled_score(self, benchmark, model, expected_ceiled_score): + benchmark_object = load_benchmark(benchmark) + filename = f"{model}-{benchmark}.nc" + precomputed_features = Path(__file__).parent / filename + s3.download_file_if_not_exists(precomputed_features, + bucket='brainscore-vision', + remote_filepath=f'benchmarks/Baker2022/{filename}') + precomputed_features = BehavioralAssembly.from_files(file_path=precomputed_features) + precomputed_features = PrecomputedFeatures(precomputed_features, visual_degrees=8) + score = benchmark_object(precomputed_features) + assert score.sel(aggregation='center') == expected_ceiled_score \ No newline at end of file diff --git a/brainscore_vision/data/baker2022/__init__.py b/brainscore_vision/data/baker2022/__init__.py new file mode 100644 index 000000000..cc7d8b8f1 --- /dev/null +++ b/brainscore_vision/data/baker2022/__init__.py @@ -0,0 +1,40 @@ +from brainio.assemblies import BehavioralAssembly +from brainscore_vision import data_registry, stimulus_set_registry, load_stimulus_set +from brainscore_vision.data_helpers.s3 import load_assembly_from_s3, load_stimulus_set_from_s3 + + +# normal distortion: +stimulus_set_registry['Baker2022_normal_distortion'] = lambda: load_stimulus_set_from_s3( + identifier='Baker2022_normal_distortion', + bucket="brainio-brainscore", + csv_sha1="17d4db7458a29a787d12bb29c34e91daef1872bf", + zip_sha1="2c726abaf081c8a9828269a559222f8c6eea0e4f", + csv_version_id="upenpJUf5t_sZpuqmQOkARfnDx1ITvi0", + zip_version_id="GO0K_jHqUMWVYcV2DoM4ka.HHv.KzrSf") + +data_registry['Baker2022_normal_distortion'] = lambda: load_assembly_from_s3( + identifier='Baker2022_normal_distortion', + version_id="7HACUgvwlGp_mIWb62BFzh8JlMwtDxpo", + sha1="46c79a48bf2dbd995a9305d8fbc03a134a852e17", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Baker2022_normal_distortion'), +) + +# inverted distortion: +stimulus_set_registry['Baker2022_inverted_distortion'] = lambda: load_stimulus_set_from_s3( + identifier='Baker2022_inverted_distortion', + bucket="brainio-brainscore", + csv_sha1="91e452e4651024c2b382694edfcbc7bdc6c3189b", + zip_sha1="4740a096af994c2232350469c664e53796f17a05", + csv_version_id="yiKAKACwcvMygzX27DI3BSG5h599Eji_", + zip_version_id="ZAmZEaRp0S2txTdXjn3LBY_vcFXaALPr") + +data_registry['Baker2022_inverted_distortion'] = lambda: load_assembly_from_s3( + identifier='Baker2022_inverted_distortion', + version_id="7MV4uklu3pL0I9LB2xlHWvlnOjG7Flbv", + sha1="b76fb57b25a58ca68db78d188fd0a783e1dcaf73", + bucket="brainio-brainscore", + cls=BehavioralAssembly, + stimulus_set_loader=lambda: load_stimulus_set('Baker2022_inverted_distortion'), +) \ No newline at end of file diff --git a/brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py b/brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py new file mode 100644 index 000000000..a8953b021 --- /dev/null +++ b/brainscore_vision/data/baker2022/data_packaging/inverted_distortion_data_assembly.py @@ -0,0 +1,43 @@ +import numpy as np + +from brainio.assemblies import BehavioralAssembly +from brainio.packaging import package_data_assembly +import pandas as pd + +''' +Experiment Information: +https://www.sciencedirect.com/science/article/pii/S2589004222011853#sec9 + - (Data/subjects are from experiment 1 in paper above) + + - 32 subjects + - 40 images for each condition (whole, fragmented, frankenstein) = 120 images/subject shown + - 120 * 32 = 3840 total images shown + - However there are only 3706 trials total, for data issues (according to Baker) + - 9-way AFC, from set: {bear, bunny, cat, elephant, frog, lizard, tiger, turtle, wolf} + +''' + +# initial csv to dataframe processing: +all_subjects = pd.read_csv('human_data/inverted.csv') + + +# construct the assembly +assembly = BehavioralAssembly(all_subjects['RSP'], + coords={ + 'stimulus_id': ('presentation', all_subjects['FileName']), + 'subject': ('presentation', all_subjects['Subj']), + 'orientation': ('presentation', all_subjects['Inv']), + 'condition': ('presentation', all_subjects['Config']), + 'truth': ('presentation', all_subjects['Animal']), + 'correct': ('presentation', all_subjects['RSP']), + }, + dims=['presentation'] + ) + +# give the assembly an identifier name +assembly.name = 'Baker2022_inverted_distortion' + +# upload to S3 +package_data_assembly('brainio_brainscore', assembly, assembly_identifier=assembly.name, + stimulus_set_identifier='Baker2022_inverted_distortion', + assembly_class_name="BehavioralAssembly", bucket_name="brainio-brainscore") diff --git a/brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py b/brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py new file mode 100644 index 000000000..ecccd02e5 --- /dev/null +++ b/brainscore_vision/data/baker2022/data_packaging/inverted_distortion_stimulus_set.py @@ -0,0 +1,81 @@ +from pathlib import Path +from brainio.stimuli import StimulusSet +from brainio.packaging import package_stimulus_set +import pandas as pd + +stimuli = [] +image_paths = {} +stimuli_directory = 'images' + + +''' +Dataset Information: +- From Baker 2022: https://www.sciencedirect.com/science/article/pii/S2589004222011853#sec9 +- 4320 trials total, across 12 subjects. Each subject saw the same 360 images, composed of: + - 90 inverted-whole, 90 inverted-frankenstein, 90-normal frankenstein, 90-normal whole. + - of those 4 subtypes, they are further broken into the same 10 images of the set of 9 categories below. + +Fields: +1) ground_truth: the base object, in set above +2) image_type: a string in the set {w, f} for {whole, frankenstein} respectively. + Note: inverted only hase whole and frankenstein images, no fragmented +3) image_number: a number {1,2...40} indicates the image variation + +''' +categories = ['bear', 'bunny', 'cat', 'elephant', 'frog', 'lizard', 'tiger', 'turtle', 'wolf'] +images_actually_shown = pd.read_csv('human_data/inverted.csv') +images_actually_shown = set(images_actually_shown["FileName"].values) + +for filepath in Path(stimuli_directory).glob('*.jpg'): + + # entire name of image file: + image_id = filepath.stem + + if image_id in images_actually_shown: + import re + match = re.match(r"([a-z]+)([0-9]+)", image_id, re.I) + if match: + items = match.groups() + else: + items = ["", ""] + + # ground truth + ground_truth = items[0] + + # image_number: + image_number = items[1] + + # parse the needed image type letter + if ground_truth in categories: + image_type = "w" + else: + image_type = ground_truth[0] + ground_truth = ground_truth[1:] + + if "inv" in ground_truth: + ground_truth = ground_truth.replace("inv", "") + elif "nv" in ground_truth: + ground_truth = ground_truth.replace("nv", "") + + image_paths[image_id] = filepath + stimuli.append({ + 'stimulus_id': image_id, + 'animal': ground_truth, + 'image_type': "w" if image_type is "i" else image_type, + 'image_type_entire': "whole" if image_type is "w" else "frankenstein", + 'image_number': image_number, + "orientation": "normal" if "inv" not in image_id else "inverted", + }) + else: + pass + +stimuli = StimulusSet(stimuli) +stimuli.stimulus_paths = image_paths + + +# give the StimulusSet an identifier name +stimuli.name = 'Baker2022_inverted_distortion' + +# upload to S3 +package_stimulus_set("brainio_brainscore", stimuli, stimulus_set_identifier=stimuli.name, + bucket_name="brainio-brainscore") diff --git a/brainscore_vision/data/baker2022/data_packaging/mapping.py b/brainscore_vision/data/baker2022/data_packaging/mapping.py new file mode 100644 index 000000000..f5f4c871a --- /dev/null +++ b/brainscore_vision/data/baker2022/data_packaging/mapping.py @@ -0,0 +1,60 @@ +import pandas as pd +import re + +human_data = pd.read_csv('human_data_normal/human_data.csv') +images_shown = pd.read_csv('human_data_normal/images_shown.csv') +human_data["image_shown"] = "FILL" + +columns = {"Participants 1-7": [1,2,3,4,5,6,7],"Participants 8-13": [8,9,10,11,12,13], + "Participants 14-19": [14,15,16,17,18,19],"Participants 20-24": [20,21,22,23,24], + "Participants 25-28": [25,26,27,28,29], "Participants 29-32": [29,30,31,32]} +categories = ['bear', 'bunny', 'cat', 'elephant', 'frog', 'lizard', 'tiger', 'turtle', 'wolf'] +mapping = {"o": "fragmented", "w": "whole", "f": "Frankenstein"} + +def get_parts(image_id): + match = re.match(r"([a-z]+)([0-9]+)", image_id, re.I) + if match: + items = match.groups() + else: + items = ["", ""] + # ground truth + ground_truth = items[0] + return ground_truth + + +for i in range(0, len(human_data)): + subject_number = human_data["Subj"][i] + column_to_look_at = "" + for header in columns: + if subject_number in columns[header]: + column_to_look_at = header + break + condition = human_data["Frankensteinonfig"][i] + animal = human_data["Animal"][i] + + # clear seen every subject + if i % 32 == 0: + seen = [] + + for image_name in images_shown[column_to_look_at]: + + ground_truth = get_parts(image_name) + if ground_truth in categories: + image_type = "w" + else: + image_type = ground_truth[0] + ground_truth = ground_truth[1:] + image_type_entire = mapping[image_type] + if animal in image_name: + if condition == image_type_entire and image_name not in seen: + human_data["image_shown"][i] = image_name.replace(".jpg", "") + seen.append(image_name) + break + +human_data.to_csv('human_data_normal/human_data.csv') + + + + + + diff --git a/brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py b/brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py new file mode 100644 index 000000000..15e275729 --- /dev/null +++ b/brainscore_vision/data/baker2022/data_packaging/normal_distortion_data_assembly.py @@ -0,0 +1,46 @@ +import numpy as np + +from brainio.assemblies import BehavioralAssembly +from brainio.packaging import package_data_assembly +import pandas as pd + +''' +Experiment Information: + - https://www.sciencedirect.com/science/article/pii/S2589004222011853#sec9 + - Data/subjects are from experiment 1 in paper above + + - 32 subjects + - 40 images for each condition (whole, fragmented, frankenstein) = 120 images/subject shown + - 120 * 32 = 3840 total images shown + - However there are only 3706 trials total, for data issues (according to Baker) + - 9-way AFC, from set: {bear, bunny, cat, elephant, frog, lizard, tiger, turtle, wolf} + +''' + +# initial csv to dataframe processing: +all_subjects = pd.read_csv('human_data/human_data.csv') + +# all images presented in this assembly are normal (non-inverted) +all_subjects["orientation"] = "normal" + +# construct the assembly +assembly = BehavioralAssembly(all_subjects['Correct?'], + coords={ + 'stimulus_id': ('presentation', all_subjects['image_shown']), + 'subject': ('presentation', all_subjects['Subj']), + 'condition': ('presentation', all_subjects['Frankensteinonfig']), + 'truth': ('presentation', all_subjects['Animal']), + 'correct': ('presentation', all_subjects['Correct?']), + 'orientation': ('presentation', all_subjects['orientation']), + }, + dims=['presentation'] + ) + +# give the assembly an identifier name +assembly.name = 'Baker2022_normal_distortion' + + +# upload to S3 +package_data_assembly('brainio_brainscore', assembly, assembly_identifier=assembly.name, + stimulus_set_identifier='Baker2022_normal_distortion', + assembly_class_name="BehavioralAssembly", bucket_name="brainio-brainscore") diff --git a/brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py b/brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py new file mode 100644 index 000000000..c3d66a75f --- /dev/null +++ b/brainscore_vision/data/baker2022/data_packaging/normal_distortion_stimulus_set.py @@ -0,0 +1,94 @@ +from pathlib import Path +from brainio.stimuli import StimulusSet +from brainio.packaging import package_stimulus_set +import pandas as pd +import re + +stimuli = [] +image_paths = {} +stimuli_directory = 'images' + + +''' +Dataset Information: + +- From Baker 2022: https://www.sciencedirect.com/science/article/pii/S2589004222011853#sec9 +- 1800 images total: 1080 normal distortion (** this stimulus set **), 720 inverted distortion +- normal/inverted distortion -> contains the three classes of distortion: + 1) normal image + 2) fragmented image + 3) frankenstein image + +- For the normal distortion, there are 40 images/condition, with 9 categories: + categories are: {bear, bunny, cat, elephant, frog, lizard, tiger, turtle, wolf} +- For inverted distortion, there are a variable number of images/condition, but 720 total unique images + +Fields: + +1) ground_truth: the base object, in set above +2) image_type: a string in the set {w, f, o} for {whole, frankenstein, fragmented} respectively. +3) image_number: a number {1,2...40} indicates the image variation + +''' +categories = ['bear', 'bunny', 'cat', 'elephant', 'frog', 'lizard', 'tiger', 'turtle', 'wolf'] +images_actually_shown = pd.read_csv('human_data/images_shown.csv') +images_actually_shown = pd.concat([images_actually_shown[col] for col in images_actually_shown]).values + +for filepath in Path(stimuli_directory).glob('*.jpg'): + + # entire name of image file: + image_id = filepath.stem + + if f"{image_id}.jpg" in images_actually_shown: + match = re.match(r"([a-z]+)([0-9]+)", image_id, re.I) + if match: + items = match.groups() + else: + items = ["", ""] + + # ground truth + ground_truth = items[0] + + # image_number: + image_number = items[1] + + # parse the needed image type letter + if ground_truth in categories: + image_type = "w" + else: + image_type = ground_truth[0] + ground_truth = ground_truth[1:] + + if "inv" in ground_truth: + ground_truth = ground_truth.replace("inv", "") + elif "nv" in ground_truth: + ground_truth = ground_truth.replace("nv", "") + + if image_type is "w": + image_type_entire = "whole" + elif image_type is "o": + image_type_entire = "fragmented" + else: + image_type_entire = "frankenstein" + + image_paths[image_id] = filepath + stimuli.append({ + 'stimulus_id': image_id, + 'animal': ground_truth, + 'image_type': "w" if image_type is "i" else image_type, + 'image_type_entire': image_type_entire, + 'image_number': image_number, + "orientation": "normal" if "inv" not in image_id else "inverted", + }) + +stimuli = StimulusSet(stimuli) + +stimuli.stimulus_paths = image_paths + +# remove all inverted stimuli +stimuli = stimuli[stimuli["orientation"] == "normal"] +stimuli.name = 'Baker2022_normal_distortion' # give the StimulusSet an identifier name + +# upload to S3 +package_stimulus_set("brainio_brainscore", stimuli, stimulus_set_identifier=stimuli.name, + bucket_name="brainio-brainscore") diff --git a/brainscore_vision/data/baker2022/test.py b/brainscore_vision/data/baker2022/test.py new file mode 100644 index 000000000..e96a1da31 --- /dev/null +++ b/brainscore_vision/data/baker2022/test.py @@ -0,0 +1,135 @@ +import pytest +from brainscore_vision import load_dataset, load_stimulus_set +import numpy as np + + +@pytest.mark.private_access +class TestBaker2022Stimuli: + + # general tests + @pytest.mark.parametrize('identifier', [ + 'normal', + 'inverted' + ]) + def test_stimulus_set_exist(self, identifier): + full_name = f'Baker2022_{identifier}_distortion' + stimulus_set = load_stimulus_set(full_name) + assert stimulus_set is not None + assert stimulus_set.identifier == full_name + + # tests number of images + @pytest.mark.parametrize('identifier, num_images', [ + ('normal', 716), + ('inverted', 360), + ]) + def test_num_stimuli(self, identifier, num_images): + stimulus_set = load_stimulus_set(f'Baker2022_{identifier}_distortion') + assert len(stimulus_set) == num_images + assert len(np.unique(stimulus_set["stimulus_id"])) == num_images + + # tests stimulus_set coords. Ensure normal/inverted only have their respective stimuli + @pytest.mark.parametrize('field', [ + 'stimulus_id', + 'animal', + 'image_type', + 'image_number', + "orientation", + ]) + @pytest.mark.parametrize('identifier', [ + 'normal', + 'inverted', + ]) + def test_fields_present(self, identifier, field): + stimulus_set = load_stimulus_set(f'Baker2022_{identifier}_distortion') + assert hasattr(stimulus_set, field) + + # make sure there are at least whole, frankenstein in each stimulus_set. Inverted does not have fragmented stimuli. + @pytest.mark.parametrize('identifier, count', [ + ('normal', 3), + ('inverted', 2), + ]) + def test_distortion_counts(self, identifier, count): + stimulus_set = load_stimulus_set(f'Baker2022_{identifier}_distortion') + assert len(np.unique(stimulus_set["image_type"])) == count + assert "w" in set(stimulus_set["image_type"]) + assert "f" in set(stimulus_set["image_type"]) + + # make sure there are 9 possible animals in each stimulus_set -> 9 way AFC + @pytest.mark.parametrize('identifier', [ + 'normal', + 'inverted', + ]) + def test_ground_truth_types(self, identifier): + stimulus_set = load_stimulus_set(f'Baker2022_{identifier}_distortion') + assert len(np.unique(stimulus_set["animal"])) == 9 + + # make sure there are 40 unique image numbers + @pytest.mark.parametrize('identifier', [ + 'normal', + 'inverted', + ]) + def test_image_types(self, identifier): + stimulus_set = load_stimulus_set(f'Baker2022_{identifier}_distortion') + assert len(np.unique(stimulus_set["image_number"])) == 40 + + + +@pytest.mark.private_access +class TestBaker2022Assemblies: + + # tests alignments that are the same across normal and inverted assemblies + @pytest.mark.parametrize('identifier, length', [ + ('normal', 3702), + ('inverted', 4320), + ]) + def test_stimulus_set_assembly_alignment(self, identifier, length): + full_name = f'Baker2022_{identifier}_distortion' + assembly = load_dataset(full_name) + assert assembly.stimulus_set is not None + assert assembly.stimulus_set.identifier == f'Baker2022_{identifier}_distortion' + assert set(assembly.stimulus_set["animal"]) == set(assembly["truth"].values) + assert set(assembly.stimulus_set["stimulus_id"]) == set(assembly["stimulus_id"].values) + assert len(assembly.presentation) == length + + # tests counts that are the same across normal and inverted assemblies + @pytest.mark.parametrize('identifier', [ + 'normal', + 'inverted', + ]) + def test_same_counts(self, identifier): + full_name = f'Baker2022_{identifier}_distortion' + assembly = load_dataset(full_name) + assert len(set((assembly["truth"]).values)) == 9 + assert set((assembly["correct"]).values) == {0, 1} + + # tests number of subjects + @pytest.mark.parametrize('identifier, num_subjects', [ + ('normal', 32), + ('inverted', 12), + ]) + def test_subjects(self, identifier, num_subjects): + full_name = f'Baker2022_{identifier}_distortion' + assembly = load_dataset(full_name) + assert len(set((assembly["subject"]).values)) == num_subjects + + # tests number of configurations in {whole, fragmented, frankenstein} Inverted does not have fragmented) + @pytest.mark.parametrize('identifier, conditions', [ + ('normal', {'whole', 'fragmented', 'Frankenstein'}), + ('inverted', {'whole', 'Frankenstein'}), + ]) + def test_conditions(self, identifier, conditions): + full_name = f'Baker2022_{identifier}_distortion' + assembly = load_dataset(full_name) + assert set((assembly["condition"]).values) == conditions + + # tests number of unique images + @pytest.mark.parametrize('identifier, num_images', [ + ('normal', 716), + ('inverted', 360), + ]) + def test_subjects(self, identifier, num_images): + full_name = f'Baker2022_{identifier}_distortion' + assembly = load_dataset(full_name) + assert len(set((assembly["stimulus_id"]).values)) == num_images + + diff --git a/brainscore_vision/metrics/baker_accuracy_delta/__init__.py b/brainscore_vision/metrics/baker_accuracy_delta/__init__.py new file mode 100644 index 000000000..80afb8fd5 --- /dev/null +++ b/brainscore_vision/metrics/baker_accuracy_delta/__init__.py @@ -0,0 +1,4 @@ +from brainscore_vision import metric_registry +from .metric import BakerAccuracyDelta + +metric_registry['baker_accuracy_delta'] = BakerAccuracyDelta diff --git a/brainscore_vision/metrics/baker_accuracy_delta/metric.py b/brainscore_vision/metrics/baker_accuracy_delta/metric.py new file mode 100644 index 000000000..7f460010f --- /dev/null +++ b/brainscore_vision/metrics/baker_accuracy_delta/metric.py @@ -0,0 +1,95 @@ +import numpy as np + +from brainscore_core import Metric, Score +from typing import List +from brainio.assemblies import BehavioralAssembly + + +# controls how many half-splits are averaged together to get human delta. +HUMAN_SPLITS = 100 + + +class BakerAccuracyDelta(Metric): + def __init__(self, image_types: list): + self.image_types = image_types + + def __call__(self, source: BehavioralAssembly, target: BehavioralAssembly): + model_delta = get_model_delta(source, self.image_types) + scores = [] + + # calculate score over average of 100 sub splits of human delta + for i in range(HUMAN_SPLITS): + + # grab one half of the subjeects + random_state = np.random.RandomState(i) + num_subjects = len(set(target["subject"].values)) + half1_subjects = random_state.choice(range(1, num_subjects), (num_subjects // 2), replace=False) + half1 = target[{'presentation': [subject in half1_subjects for subject in target['subject'].values]}] + + human_delta = get_human_delta(half1, self.image_types) + score = max((1 - ((np.abs(human_delta - model_delta)) / human_delta)), 0) + scores.append(score) + score = np.mean(scores) + error = np.std(scores) + + score = Score([score, error], coords={'aggregation': ['center', 'error']}, dims=('aggregation',)) + score.attrs['raw'] = scores + return score + + +def extract_subjects(assembly): + return list(sorted(set(assembly['subject'].values))) + + +def get_human_delta(target, image_types): + # calculate human accuracies for [whole, condition] + condition_scores_human = {} + + # for whole condition, and other condition (frankenstein or fragmented) + for image_type in image_types: + scores = [] + + # get per subject accuracy + for subject in extract_subjects(target): + this_target = target.sel(image_type=image_type, subject=subject) + correct_count = np.count_nonzero(this_target.values) + accuracy = correct_count / len(this_target) + scores.append(accuracy) + condition_scores_human[image_type] = scores + + # calculate 16-human pairwise accuracy: + condition = image_types[1] + delta_vector = [a_i - b_i for a_i, b_i in zip(condition_scores_human["w"], condition_scores_human[condition])] + + # return mean of delta vector. This is equal to the mean of half of subject deltas (random half) + return np.mean(delta_vector) + + +def get_model_delta(source, image_types): + condition_scores_model = [] + + # for whole condition, and other condition (frank or frag) + for image_type in image_types: + + # raw network accuracy per category + scores = [] + for category in sorted(set(source['animal'].values)): + this_source = source.sel(animal=category, image_type=image_type) + correct_count = (this_source.values == category).sum() + accuracy = correct_count / len(this_source[0]) + scores.append(accuracy) + + # overall accuracy, averaged over 9 categories + overall = np.mean(scores) + condition_scores_model.append(overall) + + # return difference between whole and condition + model_delta = condition_scores_model[0] - condition_scores_model[1] + return model_delta + + +def compute_ceiling(source: BehavioralAssembly, target: BehavioralAssembly, image_types: List[str]) -> float: + half_1_delta = get_human_delta(target=source, image_types=image_types) + half_2_delta = get_human_delta(target=target, image_types=image_types) + ceiling = max((1 - ((np.abs(half_1_delta - half_2_delta)) / half_2_delta)), 0) + return ceiling diff --git a/brainscore_vision/metrics/baker_accuracy_delta/requirements.txt b/brainscore_vision/metrics/baker_accuracy_delta/requirements.txt new file mode 100644 index 000000000..296d65452 --- /dev/null +++ b/brainscore_vision/metrics/baker_accuracy_delta/requirements.txt @@ -0,0 +1 @@ +numpy \ No newline at end of file diff --git a/brainscore_vision/metrics/baker_accuracy_delta/test.py b/brainscore_vision/metrics/baker_accuracy_delta/test.py new file mode 100644 index 000000000..b100d2ec1 --- /dev/null +++ b/brainscore_vision/metrics/baker_accuracy_delta/test.py @@ -0,0 +1 @@ +# tests apart of /benchmarks/baker2022/test.py, omitted here From 3650c9b505daaf9c8fabadbaae17b3ff8f6409d6 Mon Sep 17 00:00:00 2001 From: Andrea Costantino <59078281+costantinoai@users.noreply.github.com> Date: Sun, 7 Jul 2024 10:26:08 +0200 Subject: [PATCH 47/68] Remove Maniquet2024 (#1012) * Delete brainscore_vision/benchmarks/maniquet2024 directory * Delete brainscore_vision/metrics/maniquet2024_metrics directory * Delete brainscore_vision/data/maniquet2024 directory --- .../benchmarks/maniquet2024/__init__.py | 6 - .../benchmarks/maniquet2024/benchmark.py | 192 -------- .../benchmarks/maniquet2024/requirements.txt | 3 - .../benchmarks/maniquet2024/test.py | 17 - .../data/maniquet2024/__init__.py | 59 --- .../data/maniquet2024/data_packaging.py | 151 ------ brainscore_vision/data/maniquet2024/test.py | 16 - .../metrics/maniquet2024_metrics/__init__.py | 19 - .../metrics/maniquet2024_metrics/metric.py | 443 ------------------ .../maniquet2024_metrics/requirements.txt | 3 - .../metrics/maniquet2024_metrics/test.py | 8 - 11 files changed, 917 deletions(-) delete mode 100644 brainscore_vision/benchmarks/maniquet2024/__init__.py delete mode 100644 brainscore_vision/benchmarks/maniquet2024/benchmark.py delete mode 100644 brainscore_vision/benchmarks/maniquet2024/requirements.txt delete mode 100644 brainscore_vision/benchmarks/maniquet2024/test.py delete mode 100644 brainscore_vision/data/maniquet2024/__init__.py delete mode 100644 brainscore_vision/data/maniquet2024/data_packaging.py delete mode 100644 brainscore_vision/data/maniquet2024/test.py delete mode 100644 brainscore_vision/metrics/maniquet2024_metrics/__init__.py delete mode 100644 brainscore_vision/metrics/maniquet2024_metrics/metric.py delete mode 100644 brainscore_vision/metrics/maniquet2024_metrics/requirements.txt delete mode 100644 brainscore_vision/metrics/maniquet2024_metrics/test.py diff --git a/brainscore_vision/benchmarks/maniquet2024/__init__.py b/brainscore_vision/benchmarks/maniquet2024/__init__.py deleted file mode 100644 index acc15b632..000000000 --- a/brainscore_vision/benchmarks/maniquet2024/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from brainscore_vision import benchmark_registry -from .benchmark import Maniquet2024ConfusionSimilarity, Maniquet2024TasksConsistency - -benchmark_registry['Maniquet2024-confusion_similarity'] = lambda: Maniquet2024ConfusionSimilarity() -benchmark_registry['Maniquet2024-tasks_consistency'] = lambda: Maniquet2024TasksConsistency() - diff --git a/brainscore_vision/benchmarks/maniquet2024/benchmark.py b/brainscore_vision/benchmarks/maniquet2024/benchmark.py deleted file mode 100644 index 762087e5b..000000000 --- a/brainscore_vision/benchmarks/maniquet2024/benchmark.py +++ /dev/null @@ -1,192 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Jun 21 23:15:17 2024 - -@author: costantino_ai -""" -from brainscore_vision.benchmarks import BenchmarkBase -from brainscore_vision.benchmark_helpers.screen import place_on_screen -from brainscore_vision.model_interface import BrainModel -from brainscore_vision import load_stimulus_set, load_metric, load_dataset -from brainscore_vision.utils import LazyLoad - - -BIBTEX = """@article {Maniquet2024.04.02.587669, - author = {Maniquet, Tim and de Beeck, Hans Op and Costantino, Andrea Ivan}, - title = {Recurrent issues with deep neural network models of visual recognition}, - elocation-id = {2024.04.02.587669}, - year = {2024}, - doi = {10.1101/2024.04.02.587669}, - publisher = {Cold Spring Harbor Laboratory}, - URL = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669}, - eprint = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669.full.pdf}, - journal = {bioRxiv} -}""" - - -class _Maniquet2024ConfusionSimilarity(BenchmarkBase): - """ - A benchmark class to measure the similarity between model-generated confusion probabilities - and human confusion data in visual tasks, specifically designed for the Maniquet2024 dataset. - - Attributes: - _metric (ConfusionSimilarity): The metric used to compare model outputs with human data. - _fitting_stimuli (StimulusSet): Stimulus set used for training or fitting the model. - _stimulus_set (StimulusSet): Stimulus set used for testing the model. - _human_assembly (DataAssembly): Human behavioral data for comparison. - _visual_degrees (int): The size of stimuli in visual degrees as presented to humans. - _number_of_trials (int): Number of trials to average over for the model predictions. - """ - - def __init__(self): - """ - Initializes the benchmark by setting up the necessary parameters. - """ - # Initialize the metric for evaluating confusion similarity - self._metric = load_metric('confusion_similarity') - - # Load training stimuli from the stimulus set registry - self._fitting_stimuli = load_stimulus_set('Maniquet2024-train') - - # Load testing stimuli from the stimulus set registry - self._stimulus_set = load_stimulus_set('Maniquet2024-test') - - # Load human behavioral data from the data registry - self._human_assembly = load_dataset('Maniquet2024') - - # Set the visual degrees to which the human data was exposed - self._visual_degrees = 8 - - # Set the number of trials to perform - self._number_of_trials = 1 - - # Call the parent class constructor to complete initialization - super(_Maniquet2024ConfusionSimilarity, self).__init__( - identifier="Maniquet2024-confusion_similarity'", - version=1, - ceiling_func=lambda: self._metric._ceiling(self._assembly), - parent="Maniquet2024", - bibtex=BIBTEX, - ) - - def __call__(self, candidate: BrainModel): - """ - Executes the benchmark by comparing the candidate model's confusion probabilities against human data. - - Args: - candidate (BrainModel): The model being evaluated. - - Returns: - float: The similarity score between the model and human data. - """ - # Start the model on the task of predicting confusion probabilities - fitting_stimuli = place_on_screen(self._fitting_stimuli, - target_visual_degrees=candidate.visual_degrees(), - source_visual_degrees=self._visual_degrees) - candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli) - - # Prepare the stimulus set by placing it on a virtual screen at a scale appropriate for the model - stimulus_set = place_on_screen(self._stimulus_set, - target_visual_degrees=candidate.visual_degrees(), - source_visual_degrees=self._visual_degrees) - - # Model looks at the stimulus set and returns confusion probabilities - probabilities = candidate.look_at(stimulus_set, number_of_trials=self._number_of_trials) - - # Compute the confusion similarity score between model probabilities and human assembly data - # NOTE: score output here is *already* ceiling-normalized!! - score = self._metric(probabilities, self._human_assembly) - return score - - -class _Maniquet2024TasksConsistency(BenchmarkBase): - """ - A benchmarking class designed to evaluate the consistency of the human accuracy profiles across - all tasks with the model's accuracy profiles across the same tasks. - - Attributes: - _metric (TasksConsistency): The metric for evaluating task consistency between the model and human data. - _fitting_stimuli (StimulusSet): The set of stimuli used for model training or calibration. - _stimulus_set (StimulusSet): The set of stimuli used for testing the model's predictions. - _human_assembly (DataAssembly): The dataset containing human response data for comparison. - _visual_degrees (int): The visual size of the stimuli as perceived by human subjects. - _number_of_trials (int): The number of trials over which model predictions are averaged. - """ - - def __init__(self): - """ - Initializes the benchmark setup, including loading necessary datasets, defining the metric, and setting - up the parameters for the evaluation. - """ - # Metric for evaluating the consistency of task performance - self._metric = load_metric('tasks_consistency') - - # Load training stimuli from the stimulus set registry - self._fitting_stimuli = load_stimulus_set('Maniquet2024-train') - - # Load testing stimuli from the stimulus set registry - self._stimulus_set = load_stimulus_set('Maniquet2024-test') - - # Load human behavioral data from the data registry - self._human_assembly = load_dataset('Maniquet2024') - - # Set the visual context to match human study conditions - self._visual_degrees = 8 - - # Define the number of trials for model evaluation - self._number_of_trials = 1 - - # Initialize parent class with benchmark-specific metadata - super(_Maniquet2024TasksConsistency, self).__init__( - identifier="Maniquet2024-tasks_consistency", - version=1, - ceiling_func=lambda: self._metric.ceiling(self._human_assembly), - parent="Maniquet2024", - bibtex=BIBTEX, - ) - - def __call__(self, candidate: BrainModel): - """ - Executes the benchmark by comparing the candidate model's task performance probabilities - against human data, and returns a similarity score. - - Args: - candidate (BrainModel): The neural model being evaluated. - - Returns: - float: A similarity score indicating how closely the model's responses match human responses. - """ - # Task the model with generating predictions based on the fitting stimuli - fitting_stimuli = place_on_screen( - self._fitting_stimuli, - target_visual_degrees=candidate.visual_degrees(), - source_visual_degrees=self._visual_degrees, - ) - candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli) - - # Adjust the stimulus presentation to match the model's expected input scale - stimulus_set = place_on_screen( - self._stimulus_set, - target_visual_degrees=candidate.visual_degrees(), - source_visual_degrees=self._visual_degrees, - ) - - # Obtain the model's predictions as confusion probabilities - probabilities = candidate.look_at( - stimulus_set, number_of_trials=self._number_of_trials - ) - - # Evaluate the consistency of model predictions with human data - # NOTE: score output here is *already* ceiling-normalized!! - score = self._metric(probabilities, self._human_assembly) - - return score - - -def Maniquet2024ConfusionSimilarity(): - return _Maniquet2024ConfusionSimilarity() - - -def Maniquet2024TasksConsistency(): - return _Maniquet2024TasksConsistency() diff --git a/brainscore_vision/benchmarks/maniquet2024/requirements.txt b/brainscore_vision/benchmarks/maniquet2024/requirements.txt deleted file mode 100644 index f0f99cfe1..000000000 --- a/brainscore_vision/benchmarks/maniquet2024/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -numpy -scikit-learn -scipy \ No newline at end of file diff --git a/brainscore_vision/benchmarks/maniquet2024/test.py b/brainscore_vision/benchmarks/maniquet2024/test.py deleted file mode 100644 index c0d8012b3..000000000 --- a/brainscore_vision/benchmarks/maniquet2024/test.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Mon Jun 24 17:22:59 2024 - -@author: costantino_ai -""" - -import pytest -from brainscore_vision import load_benchmark - -@pytest.mark.parametrize('benchmark', [ - 'Maniquet2024-confusion_similarity', - 'Maniquet2024-tasks_consistency', -]) -def test_benchmark_registry(benchmark): - assert load_benchmark(benchmark) is not None diff --git a/brainscore_vision/data/maniquet2024/__init__.py b/brainscore_vision/data/maniquet2024/__init__.py deleted file mode 100644 index 3fc2bc3b3..000000000 --- a/brainscore_vision/data/maniquet2024/__init__.py +++ /dev/null @@ -1,59 +0,0 @@ -from brainio.assemblies import BehavioralAssembly -from brainscore_vision import stimulus_set_registry, data_registry -from brainscore_vision.data_helpers.s3 import ( - load_assembly_from_s3, - load_stimulus_set_from_s3, -) - -BIBTEX = """@article {Maniquet2024.04.02.587669, - author = {Maniquet, Tim and de Beeck, Hans Op and Costantino, Andrea Ivan}, - title = {Recurrent issues with deep neural network models of visual recognition}, - elocation-id = {2024.04.02.587669}, - year = {2024}, - doi = {10.1101/2024.04.02.587669}, - publisher = {Cold Spring Harbor Laboratory}, - URL = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669}, - eprint = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669.full.pdf}, - journal = {bioRxiv} -}""" - -# Human Stimulus Set -stimulus_set_registry["Maniquet2024"] = lambda: load_stimulus_set_from_s3( - identifier="Maniquet2024", - bucket="brainio-brainscore", - csv_sha1="ec61e1d7776a6c3b467fee862302edac8d4a156e", - zip_sha1="bbdaf09528974c4ca3ee4cddbc91e0e03351291f", - csv_version_id="HwA7hBw0KVt6O.S_eDTXCHjOxfXlK_N3", - zip_version_id="lDUmFncDxloQp_9.S3VcpiOIPa1sCr7N", -) - - -# DNN test Stimulus Set -stimulus_set_registry["Maniquet2024-test"] = lambda: load_stimulus_set_from_s3( - identifier="Maniquet2024-test", - bucket="brainio-brainscore", - csv_sha1="993089ba4aaeffbc61303acb2a5171a5fa271ea5", - zip_sha1="39f9aaf13fdd66d284bcea99f187bb0c065144e4", - csv_version_id="G8mwsgXbuaodl_icHRzA9_LK1LeF1mco", - zip_version_id="O05BqRf79q78oQJXcN.iPeeEwNSOF2iS", -) - -# DNN train Stimulus Set -stimulus_set_registry["Maniquet2024-train"] = lambda: load_stimulus_set_from_s3( - identifier="Maniquet2024-train", - bucket="brainio-brainscore", - csv_sha1="da965af3ae5ab6e49d46c28f682ef4b75d0a2045", - zip_sha1="6685effb52f6870175988c47892b3f9a916a0375", - csv_version_id="1y.4Een3cC_ju8lqOZcSeLTXxsoPq5Wg", - zip_version_id="WUCsCnvwUWVSLaioFsKXrxpOGdIMt8ij", -) - - -# Human Data Assembly (behavioural) -data_registry["Maniquet2024"] = lambda: load_assembly_from_s3( - identifier="Maniquet2024", - version_id="ppAs1vv02btHmfmUMtLejawBuA96Iv2j", - sha1="39b8b7b29fad080ebba6df8a46ac4426261342d5", - bucket="brainio-brainscore", - cls=BehavioralAssembly, -) diff --git a/brainscore_vision/data/maniquet2024/data_packaging.py b/brainscore_vision/data/maniquet2024/data_packaging.py deleted file mode 100644 index c03f4739b..000000000 --- a/brainscore_vision/data/maniquet2024/data_packaging.py +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Sat Jun 22 00:24:30 2024 - -@author: costantino_ai -""" - -import os -import logging -from pathlib import Path -import pandas as pd -from brainio.assemblies import BehavioralAssembly -from brainio.stimuli import StimulusSet -from brainio.packaging import package_data_assembly, package_stimulus_set - -# Setup logging -logging.basicConfig( - level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" -) - -# Constants -ROOT_DIRECTORY = "./maniquet2024/private" -TAG = "Maniquet2024" - - -def load_stimulus_set(stimuli_directory, tag): - """ - Load and package stimuli from the specified directory. - - Args: - stimuli_directory (str): Directory containing stimulus files. - tag (str): Tag to assign to the stimulus set. - - Returns: - StimulusSet: Packaged set of stimuli with metadata. - """ - logging.info("Loading stimuli from directory: %s", stimuli_directory) - stimuli = [] - stimulus_paths = {} - - for filepath in Path(stimuli_directory).glob("*.png"): - stimulus_id = filepath.stem - parts = filepath.stem.split("_") - exemplar_number, manipulation, manipulation_details, category = ( - parts[0], - parts[1], - parts[2], - parts[3], - ) - - stimulus_paths[stimulus_id] = filepath - stimuli.append( - { - "stimulus_id": stimulus_id, - "manipulation": manipulation, - "manipulation_details": manipulation_details, - "image_label": category, - "exemplar_number": exemplar_number, - } - ) - - stimulus_set = StimulusSet(stimuli) - stimulus_set.stimulus_paths = stimulus_paths - stimulus_set.name = tag - logging.info("Total stimuli loaded: %d", len(stimulus_set)) - return stimulus_set - - -def load_behavioral_data(data_file, tag): - """ - Load and package experimental data from a CSV file. - - Args: - data_file (str): Path to the CSV file containing experimental data. - tag (str): Tag to assign to the behavioral data assembly. - - Returns: - BehavioralAssembly: Data assembly of behavioral responses. - """ - logging.info("Loading behavioral data from file: %s", data_file) - df = pd.read_csv(data_file) - assembly = BehavioralAssembly( - df["acc"], - dims=["presentation"], - coords={ - "stimulus_id": ("presentation", df["stimulus_id"].values), - "manipulation": ("presentation", df["condition"].values), - "manipulation_details": ("presentation", df["task_details"].values), - "mask": ("presentation", df["mask"].values), - "image_label": ("presentation", df["category"].values), - "prediction": ("presentation", df["prediction"].values), - "response": ("presentation", df["response"].values), - "reaction_time": ("presentation", df["rt"].values), - "subject_id": ("presentation", df["subj"].values), - "task": ("presentation", df["task_long"].values), - }, - ) - assembly.name = tag - logging.info( - "Data assembly loaded with %d presentations", len(assembly["presentation"]) - ) - return assembly - - -def main(): - """ - Main function to package stimulus set and experimental data, and upload to S3. - """ - logging.info("Starting the data packaging process.") - - # Load stimuli from directories - human_stimuli_directory = os.path.join(ROOT_DIRECTORY, "human_stimuli") - dnntest_stimuli_directory = os.path.join(ROOT_DIRECTORY, "dnn_stimuli/test") - dnntrain_stimuli_directory = os.path.join(ROOT_DIRECTORY, "dnn_stimuli/train") - - human_stimulus_set = load_stimulus_set(human_stimuli_directory, TAG) - dnntest_stimulus_set = load_stimulus_set(dnntest_stimuli_directory, f"{TAG}-test") - dnntrain_stimulus_set = load_stimulus_set(dnntrain_stimuli_directory, f"{TAG}-train") - - # Upload stimuli - human_stimulus_meta = package_stimulus_set( - None, human_stimulus_set, human_stimulus_set.name, "brainio-brainscore" - ) - dnntest_stimulus_meta = package_stimulus_set( - None, dnntest_stimulus_set, dnntest_stimulus_set.name, "brainio-brainscore" - ) - dnntrain_stimulus_meta = package_stimulus_set( - None, dnntrain_stimulus_set, dnntrain_stimulus_set.name, "brainio-brainscore" - ) - - # Load human data assembly - data_file = os.path.join(ROOT_DIRECTORY, "data/human_data_andrea.csv") - data_assembly = load_behavioral_data(data_file, TAG) - assembly_meta = package_data_assembly( - None, - data_assembly, - data_assembly.name, - human_stimulus_set.name, - "BehavioralAssembly", - "brainio-brainscore", - ) - - # print(human_stimulus_meta) - # print(dnntest_stimulus_meta) - # print(dnntrain_stimulus_meta) - # print(assembly_meta) - - -if __name__ == "__main__": - main() diff --git a/brainscore_vision/data/maniquet2024/test.py b/brainscore_vision/data/maniquet2024/test.py deleted file mode 100644 index 85469c40a..000000000 --- a/brainscore_vision/data/maniquet2024/test.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Thu Jun 20 17:32:25 2024 - -@author: costantino_ai -""" -import pytest -from brainscore_vision import load_dataset, load_stimulus_set - -@pytest.mark.private_access -def test_existence(): - assert load_stimulus_set('Maniquet2024') is not None - assert load_dataset('Maniquet2024') is not None - - \ No newline at end of file diff --git a/brainscore_vision/metrics/maniquet2024_metrics/__init__.py b/brainscore_vision/metrics/maniquet2024_metrics/__init__.py deleted file mode 100644 index 19f9e2ea8..000000000 --- a/brainscore_vision/metrics/maniquet2024_metrics/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from brainscore_vision import metric_registry -from .metric import ConfusionSimilarity, TasksConsistency - -BIBTEX = """@article {Maniquet2024.04.02.587669, - author = {Maniquet, Tim and de Beeck, Hans Op and Costantino, Andrea Ivan}, - title = {Recurrent issues with deep neural network models of visual recognition}, - elocation-id = {2024.04.02.587669}, - year = {2024}, - doi = {10.1101/2024.04.02.587669}, - publisher = {Cold Spring Harbor Laboratory}, - URL = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669}, - eprint = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669.full.pdf}, - journal = {bioRxiv} -}""" - -metric_registry['confusion_similarity'] = ConfusionSimilarity -metric_registry['tasks_consistency'] = TasksConsistency - - diff --git a/brainscore_vision/metrics/maniquet2024_metrics/metric.py b/brainscore_vision/metrics/maniquet2024_metrics/metric.py deleted file mode 100644 index 97a57733a..000000000 --- a/brainscore_vision/metrics/maniquet2024_metrics/metric.py +++ /dev/null @@ -1,443 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Sun Jun 23 00:44:19 2024 - -@author: costantino_ai -""" -import itertools -import numpy as np -from sklearn.metrics import confusion_matrix -from scipy.stats import pearsonr -from brainscore_core.metrics import Metric, Score -from brainio.assemblies import BehavioralAssembly - - -class ConfusionSimilarity(Metric): - """ - A metric to compute the similarity between model-generated confusion matrices and human confusion data. - - Methods: - _extract_subjects(assembly): Extracts and sorts unique subject identifiers from the assembly. - _rollout_matrix(matrix, remove_diagonal=True): Flattens a matrix into a vector, optionally removing diagonal elements. - _label_from_probability(probabilities): Derives predicted labels from probabilities. - _accuracy(y_true, y_pred): Calculates the accuracy of predictions. - _ceiling(assembly, precomputed=True): Computes the ceiling performance by assessing the highest correlation across subjects. - __call__(probabilities, human_assembly): Computes the correlation between model and human confusion matrices normalized by the ceiling. - """ - - def _extract_subjects(self, assembly): - """ - Extracts and sorts unique subject identifiers from the assembly. - - Args: - assembly (xarray.Dataset): The data assembly containing subject IDs. - - Returns: - list: Sorted list of unique subject IDs. - """ - return list(sorted(set(assembly["subject_id"].values))) - - def _rollout_matrix(self, matrix, remove_diagonal=True): - """ - Flattens a matrix into a vector. Optionally removes diagonal elements to ignore self-comparison. - - Args: - matrix (np.array): A square matrix. - remove_diagonal (bool): Whether to remove the diagonal elements of the matrix. - - Returns: - np.array: The flattened matrix as a vector. - """ - if remove_diagonal: - # Create a mask to remove diagonal elements from the matrix. - mask = np.eye(matrix.shape[0], dtype=bool) - return matrix[~mask].ravel() - else: - return matrix.ravel() - - def _label_from_probability(self, probabilities): - """ - Derives predicted labels from probabilities by selecting the class with the highest probability. - - Args: - probabilities (xarray.Dataset): Dataset containing class probabilities. - - Returns: - tuple: Arrays of true labels and predicted labels. - """ - # Extract the class with the highest probability for each instance. - classes = probabilities.choice.values - indices = np.argmax(probabilities.values, axis=1) - y_pred = classes[indices] - y_true = probabilities.image_label.values - return y_true, y_pred - - def _accuracy(self, y_true, y_pred): - """ - Calculates the accuracy of predictions. - - Args: - y_true (np.array): True labels. - y_pred (np.array): Predicted labels. - - Returns: - float: The accuracy of the predictions. - """ - return sum(y_true == y_pred) / len(y_pred) - - def _ceiling(self, assembly, precomputed=True): - """ - Compute the noise ceiling of a confusion matrix using split-half correlations. - - Args: - assembly: (Human) Assembly with expected columns 'predicted'and 'image_label'. - precomputed (Bool): If true, use precomputed ceiling measure to save time. - - Returns: - score (float): Noise ceiling average. - """ - if precomputed: - # This is to save quite a lot of time. It was precomputed on the Maniquet2024 - # human data assembly, which includes 218 participants tested on the - # Maniquet2024 stimulus set. - return 0.54007 - - # Get labels and subjects lists - labels = list(set(assembly.image_label.values)) - subjects = self._extract_subjects(assembly) - - # Start recording correlation scores - correlation_scores = [] - for subject in subjects: - - # Select data from a single subject - subj_df = assembly.sel(subject_id=subject) - - # Split it in two randomly - n_rows = int(np.round(len(subj_df) / 2)) - half = np.random.randint(0, len(subj_df), size=n_rows) - part_one, part_two = subj_df[half], subj_df[~half] - - # Compute confusion matrix for each half - cm_one = confusion_matrix( - y_true=part_one["image_label"], - y_pred=part_one["prediction"], - labels=labels, - ) - cm_two = confusion_matrix( - y_true=part_two["image_label"], - y_pred=part_two["prediction"], - labels=labels, - ) - - # Compute Pearson correlation between the two confusion matrices. - correlation_score = pearsonr( - self._rollout_matrix(cm_one), - self._rollout_matrix(cm_two), - )[0] - correlation_scores.append(correlation_score) - - # Average correlations as a measure of reliability - ceiling = np.mean(correlation_scores) - - return ceiling - - def __call__( - self, probabilities: BehavioralAssembly, human_assembly: BehavioralAssembly - ) -> Score: - """ - Computes the correlation between model and human confusion matrices normalized by the ceiling. - - Args: - probabilities (BehavioralAssembly): Model's predicted probabilities. - human_assembly (BehavioralAssembly): Human baseline responses. - - Returns: - Score: The normalized correlation score as a performance metric. - """ - assert sorted(set(probabilities.choice.values)) == sorted( - set(human_assembly.image_label.values) - ) - - # Extract labels from the model probabilities. - y_true, y_pred = self._label_from_probability(probabilities) - - # Calculate the model's confusion matrix. - dnn_confmat = confusion_matrix( - y_true=y_true, y_pred=y_pred, labels=probabilities.choice.values - ) - - # Calculate the human confusion matrix. - human_confmat = confusion_matrix( - y_true=human_assembly["image_label"], - y_pred=human_assembly["prediction"], - labels=probabilities.choice.values, - ) - - # Compute the Pearson correlation between the model and human confusion matrices. - correlation_score = pearsonr( - self._rollout_matrix(human_confmat), self._rollout_matrix(dnn_confmat) - )[0] - ceiling = self._ceiling(human_assembly, precomputed=True) - - # Normalize by ceiling - score = Score(correlation_score / ceiling) - score.attrs["raw"] = correlation_score - score.attrs["ceiling"] = ceiling - - return score - - -class TasksConsistency(Metric): - """ - A metric to compute the consistency between model and human accuracy profiles across different tasks. - - Methods: - _extract_subjects(assembly): Extracts and sorts unique subject identifiers from the assembly. - _extract_tasks(assembly): Extracts and sorts unique task identifiers from the assembly. - _rollout_matrix(matrix, remove_diagonal=True): Flattens a matrix into a vector, optionally removing diagonal elements. - _label_from_probability(probabilities): Derives predicted labels from probabilities. - _accuracy(y_true, y_pred): Calculates the accuracy of predictions. - _ceiling(assembly, precomputed=True): Computes the ceiling performance by assessing the highest correlation across subjects. - _map_human_to_dnn_categories(human_task): Maps a human task name to the corresponding DNN categories of 'manipulation' and 'manipulation_details'. - __call__(probabilities, human_assembly): Computes the correlation between model and human confusion matrices normalized by the ceiling. - """ - - def _extract_subjects(self, assembly): - """ - Extracts and sorts unique subject identifiers from the assembly. - - Args: - assembly (xarray.Dataset): The data assembly containing subject IDs. - - Returns: - list: Sorted list of unique subject IDs. - """ - return list(sorted(set(assembly["subject_id"].values))) - - def _extract_tasks(self, assembly): - """ - Extracts and sorts unique task identifiers from the assembly. - - Args: - assembly (xarray.Dataset): The data assembly containing task IDs. - - Returns: - list: Sorted list of unique task IDs. - """ - return list(sorted(set(assembly["task"].values))) - - def _rollout_matrix(self, matrix, remove_diagonal=True): - """ - Flattens a matrix into a vector. Optionally removes diagonal elements to ignore self-comparison. - - Args: - matrix (np.array): A square matrix. - remove_diagonal (bool): Whether to remove the diagonal elements of the matrix. - - Returns: - np.array: The flattened matrix as a vector. - """ - if remove_diagonal: - # Create a mask to remove diagonal elements from the matrix. - mask = np.eye(matrix.shape[0], dtype=bool) - return matrix[~mask].ravel() - else: - return matrix.ravel() - - def _label_from_probability(self, probabilities): - """ - Derives predicted labels from probabilities by selecting the class with the highest probability. - - Args: - probabilities (xarray.Dataset): Dataset containing class probabilities. - - Returns: - tuple: Arrays of true labels and predicted labels. - """ - # Extract the class with the highest probability for each instance. - classes = probabilities.choice.values - indices = np.argmax(probabilities.values, axis=1) - y_pred = classes[indices] - y_true = probabilities.image_label.values - return y_true, y_pred - - def _accuracy(self, y_true, y_pred): - """ - Calculates the accuracy of predictions. - - Args: - y_true (np.array): True labels. - y_pred (np.array): Predicted labels. - - Returns: - float: The accuracy of the predictions. - """ - return sum(y_true == y_pred) / len(y_pred) - - def _ceiling(self, assembly, precomputed=True): - """ - Computes the ceiling performance by assessing the average split-half correlation across subjects. - - Args: - assembly (xarray.Dataset): The data assembly containing subject data. - precomputed (bool): Whether to use precomputed ceiling value. - - Returns: - Score: The average correlation score across all subject pairs. - """ - if precomputed: - # This precomputed value is based on the Maniquet2024 human data assembly, - # which includes 218 participants tested on the Maniquet2024 stimulus set. - return 0.99810 - - # Initialize an empty list to store correlations for each iteration - iter_task_correlations = [] - - # Perform 50 iterations for split-half correlation - for i in range(50): - - # Randomly split the data assembly into two halves - n_rows = int(np.round(len(assembly) / 2)) - half = np.random.randint(0, len(assembly), size=n_rows) - part_one, part_two = assembly[half], assembly[~half] - - # Extract performance vectors for each half across all tasks - perf_vec_one = [ - float(np.mean(part_one[part_one["task"] == task])) - for task in self.human_tasks - ] - perf_vec_two = [ - float(np.mean(part_two.loc[part_two["task"] == task])) - for task in self.human_tasks - ] - - # Calculate the Pearson correlation between the performance vectors of the two halves - corr_perf = pearsonr(perf_vec_one, perf_vec_two)[0] - - # Append the correlation result to the list for this iteration - iter_task_correlations.append(corr_perf) - - return np.mean(iter_task_correlations) - - def _map_human_to_dnn_categories(self, human_task): - """ - Maps a human task name to the corresponding DNN categories of 'manipulation' and 'manipulation_details'. - - Args: - human_task (str): A task name from the human tasks list. - - Returns: - tuple: A tuple where the first element is the 'manipulation' and the second is 'manipulation_details'. - """ - # Mapping based on the provided details - manipulation_mapping = { - "clutter": "clutter", - "control": "control", - "occlusion": "occluder", - "scrambling": "phasescrambling", - } - - detail_mapping = { - "heavy": "heavy", - "light": "light", - "highpass": "highpass", - "lowpass": "lowpass", - "few_large_blobs_high": "fewlarge-high", - "few_large_blobs_low": "fewlarge-low", - "few_large_deletion_high": "fewlarge-high", - "few_large_deletion_low": "fewlarge-low", - "many_small_blobs_high": "manysmall-high", - "many_small_blobs_low": "manysmall-low", - "many_small_deletion_high": "manysmall-high", - "many_small_deletion_low": "manysmall-low", - "few_large_partial_viewing_high": "fewlarge-high", - "few_large_partial_viewing_low": "fewlarge-low", - "many_small_partial_viewing_high": "manysmall-high", - "many_small_partial_viewing_low": "manysmall-low", - } - - parts = human_task.split("_") - if "control" in parts: - # Handle control separately as it doesn't fit other patterns - return ("control", "control") - - # Determine manipulation by first relevant keyword - manipulation = next( - (manipulation_mapping[key] for key in manipulation_mapping if key in parts), - None, - ) - - # Construct a detail key from remaining parts excluding known manipulation keys - detail_parts = [part for part in parts if part not in manipulation_mapping] - detail_key = "_".join(detail_parts) - - # Find the matching manipulation detail - manipulation_detail = detail_mapping.get( - detail_key, "control" - ) # Default to control if no match found - - return (manipulation, manipulation_detail) - - def __call__( - self, probabilities: BehavioralAssembly, human_assembly: BehavioralAssembly - ) -> Score: - """ - Computes the correlation between model and human accuracy profiles across tasks, normalized by the ceiling. - - Args: - probabilities (BehavioralAssembly): Model's predicted probabilities. - human_assembly (BehavioralAssembly): Human baseline responses. - - Returns: - Score: The normalized correlation score as a performance metric. - """ - assert sorted(set(probabilities.choice.values)) == sorted( - set(human_assembly.image_label.values) - ) - - # Get list of tasks - self.human_tasks = self._extract_tasks(human_assembly) - - # Store accuracies - dnn_accs = [] - human_accs = [] - - # Calculate the model's accuracy across tasks. - for human_task in self.human_tasks: - # Convert the human task into DNN lingo - manipulation, manipulation_details = self._map_human_to_dnn_categories( - human_task - ) - - # Extract labels from the model probabilities. - probabilities_filtered = probabilities[ - (probabilities["manipulation"] == manipulation) - & (probabilities["manipulation_details"] == manipulation_details) - ] - - dnn_y_true, dnn_y_pred = self._label_from_probability(probabilities_filtered) - dnn_acc = self._accuracy(dnn_y_true, dnn_y_pred) - dnn_accs.append(dnn_acc) - - # Extract labels from the human responses. - human_responses_filtered = human_assembly[ - human_assembly["task"] == human_task - ] - human_acc = self._accuracy( - human_responses_filtered["image_label"], - human_responses_filtered["prediction"], - ) - human_accs.append(human_acc) - - # Compute the Pearson correlation between the model and human accuracy profiles. - correlation_score = pearsonr(dnn_accs, human_accs)[0] - ceiling = self._ceiling(human_assembly, precomputed=True) - - # Normalize by ceiling - score = Score(correlation_score / ceiling) - score.attrs["raw"] = correlation_score - score.attrs["ceiling"] = ceiling - - return score diff --git a/brainscore_vision/metrics/maniquet2024_metrics/requirements.txt b/brainscore_vision/metrics/maniquet2024_metrics/requirements.txt deleted file mode 100644 index 595f57d71..000000000 --- a/brainscore_vision/metrics/maniquet2024_metrics/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -numpy -sklearn -scipy \ No newline at end of file diff --git a/brainscore_vision/metrics/maniquet2024_metrics/test.py b/brainscore_vision/metrics/maniquet2024_metrics/test.py deleted file mode 100644 index 496156fc7..000000000 --- a/brainscore_vision/metrics/maniquet2024_metrics/test.py +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Sun Jun 23 13:40:53 2024 - -@author: costantino_ai -""" - From 5a00db08c3e63351d0f031e3b483da475938f597 Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Sun, 7 Jul 2024 12:41:39 +0200 Subject: [PATCH 48/68] flag benchmark scoring test as private for Malania2007 (#1013) --- brainscore_vision/benchmarks/malania2007/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/brainscore_vision/benchmarks/malania2007/test.py b/brainscore_vision/benchmarks/malania2007/test.py index 8a8ca75cd..af35e131f 100644 --- a/brainscore_vision/benchmarks/malania2007/test.py +++ b/brainscore_vision/benchmarks/malania2007/test.py @@ -43,6 +43,7 @@ def test_dataset_ceiling(self, dataset, expected_ceiling): ceiling = benchmark.ceiling assert ceiling == expected_ceiling + @pytest.mark.private_access @pytest.mark.parametrize('dataset, expected_score', [ ('short2-threshold_elevation', approx(0.0, abs=0.001)), ('short4-threshold_elevation', approx(0.0, abs=0.001)), From 54a2330aade7cab868544c6ae6fba8650810374b Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Mon, 8 Jul 2024 16:22:04 +0200 Subject: [PATCH 49/68] add description for identifier conventions (#1009) --- docs/source/modules/benchmark_tutorial.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docs/source/modules/benchmark_tutorial.rst b/docs/source/modules/benchmark_tutorial.rst index 454f3d246..673fc3740 100644 --- a/docs/source/modules/benchmark_tutorial.rst +++ b/docs/source/modules/benchmark_tutorial.rst @@ -384,3 +384,21 @@ If any stimuli or data should be made public, please let us know so that we can policy. After the PR has been merged, the submission system will automatically run all existing models on the new benchmark. + + +Naming conventions +================== +**Identifiers**: + +* Benchmark: At the top level, benchmark identifiers should combine data and metric identifiers, + separated by a dash: ``{data}-{metric}``. + +* Data: Identifiers for datasets vary depending on the community but should ideally not include dashes + (which are used in the benchmark identifier, see above). + For brain and cognitive science datasets, data identifiers often point to the paper or report + where the data was first introduced (e.g. ``MajajHong2015`` or ``Sanghavi2020``). + When using components of datasets, we recommend separating those with a dot, + and to use an underscore between multiple words (e.g. ``MajajHong2015.IT`` or ``Malania2007.vernier_only``). + For machine learning ("engineering") datasets, data identifiers are often descriptive (e.g. ``ImageNet``). + +* Metric: Identifiers for metrics are typically descriptive (e.g. ``rdm``, ``pls``, ``accuracy``). From 3c81b37e772c226724c7baca7694d86c34ccb589 Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Tue, 9 Jul 2024 10:11:59 +0200 Subject: [PATCH 50/68] set visual degrees for BMD2024 benchmark (#1017) 8 degrees "is also what we calibrated stimulus presentation for participants to be in the actual experiments" --- brainscore_vision/benchmarks/bmd2024/benchmark.py | 1 + 1 file changed, 1 insertion(+) diff --git a/brainscore_vision/benchmarks/bmd2024/benchmark.py b/brainscore_vision/benchmarks/bmd2024/benchmark.py index 93009304a..68ee65146 100644 --- a/brainscore_vision/benchmarks/bmd2024/benchmark.py +++ b/brainscore_vision/benchmarks/bmd2024/benchmark.py @@ -14,6 +14,7 @@ class _BMD_2024_BehavioralAccuracyDistance(BenchmarkBase): # behavioral benchmark def __init__(self, dataset): + self._visual_degrees = 8 self._metric = load_metric('accuracy_distance') self._assembly = LazyLoad(lambda: load_assembly(dataset)) super(_BMD_2024_BehavioralAccuracyDistance, self).__init__( From 3aaf121b79062ed18e1cca6c97e22358742a34c3 Mon Sep 17 00:00:00 2001 From: Michael Ferguson Date: Tue, 9 Jul 2024 04:18:16 -0400 Subject: [PATCH 51/68] remove uneeded requirements.txt (#1015) --- brainscore_vision/models/hmax/requirements.txt | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/brainscore_vision/models/hmax/requirements.txt b/brainscore_vision/models/hmax/requirements.txt index 94fbf10ef..c2afe6a3a 100644 --- a/brainscore_vision/models/hmax/requirements.txt +++ b/brainscore_vision/models/hmax/requirements.txt @@ -1,8 +1,6 @@ torchvision torch numpy -os scipy logging -PIL -collections \ No newline at end of file +pillow \ No newline at end of file From 42c31574ae69eae1792949591cec8427b52581af Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Tue, 9 Jul 2024 14:05:02 +0200 Subject: [PATCH 52/68] fix identifiers (previously duplicate) and score object (#1020) --- .../benchmarks/baker2022/__init__.py | 6 +-- .../benchmarks/baker2022/benchmark.py | 33 ++++++------ .../benchmarks/baker2022/test.py | 50 ++++++++++--------- .../metrics/baker_accuracy_delta/metric.py | 23 ++++----- .../metrics/baker_accuracy_delta/test.py | 2 +- 5 files changed, 58 insertions(+), 56 deletions(-) diff --git a/brainscore_vision/benchmarks/baker2022/__init__.py b/brainscore_vision/benchmarks/baker2022/__init__.py index 5bbad98d1..827c51d08 100644 --- a/brainscore_vision/benchmarks/baker2022/__init__.py +++ b/brainscore_vision/benchmarks/baker2022/__init__.py @@ -4,6 +4,6 @@ DATASETS = ['normal', 'inverted'] -benchmark_registry['Baker2022-accuracy_delta_frankenstein'] = lambda: Baker2022AccuracyDeltaFrankenstein() -benchmark_registry['Baker2022-accuracy_delta_fragmented'] = lambda: Baker2022AccuracyDeltaFragmented() -benchmark_registry['Baker2022-inverted_accuracy_delta'] = lambda: Baker2022InvertedAccuracyDelta() +benchmark_registry['Baker2022frankenstein-accuracy_delta'] = lambda: Baker2022AccuracyDeltaFrankenstein() +benchmark_registry['Baker2022fragmented-accuracy_delta'] = lambda: Baker2022AccuracyDeltaFragmented() +benchmark_registry['Baker2022inverted-accuracy_delta'] = lambda: Baker2022InvertedAccuracyDelta() diff --git a/brainscore_vision/benchmarks/baker2022/benchmark.py b/brainscore_vision/benchmarks/baker2022/benchmark.py index 0473cc5a9..baedd2be5 100644 --- a/brainscore_vision/benchmarks/baker2022/benchmark.py +++ b/brainscore_vision/benchmarks/baker2022/benchmark.py @@ -1,14 +1,14 @@ import numpy as np -import numpy.random + +from brainscore_vision.benchmark_helpers import bound_score from brainscore_vision.benchmarks import BenchmarkBase from brainscore_vision.benchmark_helpers.screen import place_on_screen from brainscore_vision.metrics import Score -from brainscore_vision.metrics.baker_accuracy_delta.metric import BakerAccuracyDelta, compute_ceiling from brainscore_vision.model_interface import BrainModel from brainscore_vision.utils import LazyLoad from typing import List from brainio.assemblies import DataAssembly -from brainscore_vision import load_dataset +from brainscore_vision import load_dataset, load_metric BIBTEX = """@article{BAKER2022104913, title = {Deep learning models fail to capture the configural nature of human shape perception}, @@ -30,22 +30,24 @@ class _Baker2022AccuracyDelta(BenchmarkBase): - def __init__(self, dataset: str, image_types: List[str]): + def __init__(self, identifier_suffix: str, dataset: str, image_types: List[str]): """ + :param identifier_suffix: how to uniquely identify this combination of dataset and image types :param dataset: orientation of stimuli. Either 'normal' or 'inverted' :param image_types: Either ["w", "f"] for frankenstein delta or ["w", "o"] for fragmented delta """ - self._metric = BakerAccuracyDelta(image_types=image_types) + self._metric = load_metric('baker_accuracy_delta', image_types=image_types) self.image_types = image_types self.orientation = dataset - self._ceiling = SplitHalvesConsistencyBaker(num_splits=100, split_coordinate="subject", + self._ceiling = SplitHalvesConsistencyBaker(metric=self._metric, + num_splits=100, split_coordinate="subject", image_types=self.image_types) self._assembly = LazyLoad(lambda: load_assembly(dataset)) self._visual_degrees = 8.8 self._number_of_trials = 1 super(_Baker2022AccuracyDelta, self).__init__( - identifier=f'Baker2022{dataset}-accuracy_delta', version=1, + identifier=f'Baker2022{identifier_suffix}-accuracy_delta', version=1, ceiling_func=lambda: self._ceiling(assembly=self._assembly), parent='Baker2022', bibtex=BIBTEX) @@ -63,25 +65,22 @@ def __call__(self, candidate: BrainModel): ceiling = self._ceiling(assembly) score = raw_score / ceiling - # cap score at 1 if ceiled score > 1 - if score[(score['aggregation'] == 'center')] > 1: - score.__setitem__({'aggregation': score['aggregation'] == 'center'}, 1) - + bound_score(score) # cap score at 1 if ceiled score > 1 score.attrs['raw'] = raw_score score.attrs['ceiling'] = ceiling return score def Baker2022AccuracyDeltaFrankenstein(): - return _Baker2022AccuracyDelta(dataset='normal', image_types=["w", "f"]) + return _Baker2022AccuracyDelta(identifier_suffix='frankenstein', dataset='normal', image_types=["w", "f"]) def Baker2022AccuracyDeltaFragmented(): - return _Baker2022AccuracyDelta(dataset='normal', image_types=["w", "o"]) + return _Baker2022AccuracyDelta(identifier_suffix='fragmented', dataset='normal', image_types=["w", "o"]) def Baker2022InvertedAccuracyDelta(): - return _Baker2022AccuracyDelta(dataset='inverted', image_types=["w", "f"]) + return _Baker2022AccuracyDelta(identifier_suffix='inverted', dataset='inverted', image_types=["w", "f"]) def load_assembly(dataset): @@ -91,11 +90,13 @@ def load_assembly(dataset): # ceiling method: class SplitHalvesConsistencyBaker: - def __init__(self, num_splits: int, split_coordinate: str, image_types): + def __init__(self, metric, num_splits: int, split_coordinate: str, image_types): """ + :param metric: the metric used to estimate a ceiling :param num_splits: how many times to create two halves :param split_coordinate: over which coordinate to split the assembly into halves """ + self._metric = metric self.num_splits = num_splits self.split_coordinate = split_coordinate self.image_types = image_types @@ -111,7 +112,7 @@ def __call__(self, assembly: DataAssembly) -> Score: {'presentation': [subject in half1_subjects for subject in assembly['subject'].values]}] half2 = assembly[ {'presentation': [subject not in half1_subjects for subject in assembly['subject'].values]}] - consistency = compute_ceiling(half1, half2, self.image_types) + consistency = self._metric.compute_ceiling(half1, half2, self.image_types) uncorrected_consistencies.append(consistency) # Spearman-Brown correction for sub-sampling corrected_consistency = 2 * consistency / (1 + (2 - 1) * consistency) diff --git a/brainscore_vision/benchmarks/baker2022/test.py b/brainscore_vision/benchmarks/baker2022/test.py index 6b58d06c2..8142dd603 100644 --- a/brainscore_vision/benchmarks/baker2022/test.py +++ b/brainscore_vision/benchmarks/baker2022/test.py @@ -16,19 +16,21 @@ def test_count(self): assert len(DATASETS) == 2 # ensure the three benchmarks themselves are there - @pytest.mark.parametrize('benchmark', [ - 'Baker2022-inverted_accuracy_delta', - 'Baker2022-accuracy_delta_fragmented', - 'Baker2022-inverted_accuracy_delta' + @pytest.mark.parametrize('identifier', [ + 'Baker2022inverted-accuracy_delta', + 'Baker2022fragmented-accuracy_delta', + 'Baker2022frankenstein-accuracy_delta', ]) - def test_in_pool(self, benchmark): - assert benchmark in benchmark_registry + def test_identifier(self, identifier): + assert identifier in benchmark_registry + benchmark = load_benchmark(identifier=identifier) + assert benchmark.identifier == identifier # Test expected ceiling @pytest.mark.parametrize('benchmark, expected_ceiling', [ - ('Baker2022-accuracy_delta_frankenstein', 0.8498), - ('Baker2022-accuracy_delta_fragmented', 0.9385), - ('Baker2022-inverted_accuracy_delta', 0.6538), + ('Baker2022frankenstein-accuracy_delta', 0.8498), + ('Baker2022fragmented-accuracy_delta', 0.9385), + ('Baker2022inverted-accuracy_delta', 0.6538), ]) def test_benchmark_ceiling(self, benchmark, expected_ceiling): benchmark = load_benchmark(benchmark) @@ -42,12 +44,12 @@ def test_benchmark_ceiling(self, benchmark, expected_ceiling): # Test raw scores @pytest.mark.parametrize('benchmark, model, expected_raw_score', [ - ('Baker2022-accuracy_delta_frankenstein', 'resnet-50-pytorch', approx(0.2847, abs=0.0001)), - ('Baker2022-accuracy_delta_fragmented', 'resnet-50-pytorch', approx(0.8452, abs=0.0001)), - ('Baker2022-inverted_accuracy_delta', 'resnet-50-pytorch', approx(0.0, abs=0.0001)), - ('Baker2022-accuracy_delta_frankenstein', 'resnet50-SIN', approx(0.6823, abs=0.0001)), - ('Baker2022-accuracy_delta_fragmented', 'resnet50-SIN', approx(0.9100, abs=0.0001)), - ('Baker2022-inverted_accuracy_delta', 'resnet50-SIN', approx(0.7050, abs=0.0001)), + ('Baker2022frankenstein-accuracy_delta', 'resnet-50-pytorch', approx(0.2847, abs=0.0001)), + ('Baker2022fragmented-accuracy_delta', 'resnet-50-pytorch', approx(0.8452, abs=0.0001)), + ('Baker2022inverted-accuracy_delta', 'resnet-50-pytorch', approx(0.0, abs=0.0001)), + ('Baker2022frankenstein-accuracy_delta', 'resnet50-SIN', approx(0.6823, abs=0.0001)), + ('Baker2022fragmented-accuracy_delta', 'resnet50-SIN', approx(0.9100, abs=0.0001)), + ('Baker2022inverted-accuracy_delta', 'resnet50-SIN', approx(0.7050, abs=0.0001)), ]) def test_model_raw_score(self, benchmark, model, expected_raw_score): @@ -63,17 +65,17 @@ def test_model_raw_score(self, benchmark, model, expected_raw_score): raw_score = score.raw # division by ceiling <= 1 should result in higher score - assert score.sel(aggregation='center') >= raw_score.sel(aggregation='center') - assert raw_score.sel(aggregation='center') == expected_raw_score + assert score >= raw_score + assert raw_score == expected_raw_score # test ceiled score @pytest.mark.parametrize('benchmark, model, expected_ceiled_score', [ - ('Baker2022-accuracy_delta_frankenstein', 'resnet-50-pytorch', approx(0.3350, abs=0.0001)), - ('Baker2022-accuracy_delta_fragmented', 'resnet-50-pytorch', approx(0.9005, abs=0.0001)), - ('Baker2022-inverted_accuracy_delta', 'resnet-50-pytorch', approx(0.0, abs=0.0001)), - ('Baker2022-accuracy_delta_frankenstein', 'resnet50-SIN', approx(0.8029, abs=0.0001)), - ('Baker2022-accuracy_delta_fragmented', 'resnet50-SIN', approx(0.9696, abs=0.0001)), - ('Baker2022-inverted_accuracy_delta', 'resnet50-SIN', approx(1.000, abs=0.0001)), + ('Baker2022frankenstein-accuracy_delta', 'resnet-50-pytorch', approx(0.3350, abs=0.0001)), + ('Baker2022fragmented-accuracy_delta', 'resnet-50-pytorch', approx(0.9005, abs=0.0001)), + ('Baker2022inverted-accuracy_delta', 'resnet-50-pytorch', approx(0.0, abs=0.0001)), + ('Baker2022frankenstein-accuracy_delta', 'resnet50-SIN', approx(0.8029, abs=0.0001)), + ('Baker2022fragmented-accuracy_delta', 'resnet50-SIN', approx(0.9696, abs=0.0001)), + ('Baker2022inverted-accuracy_delta', 'resnet50-SIN', approx(1.000, abs=0.0001)), ]) def test_model_ceiled_score(self, benchmark, model, expected_ceiled_score): benchmark_object = load_benchmark(benchmark) @@ -85,4 +87,4 @@ def test_model_ceiled_score(self, benchmark, model, expected_ceiled_score): precomputed_features = BehavioralAssembly.from_files(file_path=precomputed_features) precomputed_features = PrecomputedFeatures(precomputed_features, visual_degrees=8) score = benchmark_object(precomputed_features) - assert score.sel(aggregation='center') == expected_ceiled_score \ No newline at end of file + assert score == expected_ceiled_score diff --git a/brainscore_vision/metrics/baker_accuracy_delta/metric.py b/brainscore_vision/metrics/baker_accuracy_delta/metric.py index 7f460010f..271e1537f 100644 --- a/brainscore_vision/metrics/baker_accuracy_delta/metric.py +++ b/brainscore_vision/metrics/baker_accuracy_delta/metric.py @@ -1,9 +1,9 @@ +from typing import List + import numpy as np -from brainscore_core import Metric, Score -from typing import List from brainio.assemblies import BehavioralAssembly - +from brainscore_core import Metric, Score # controls how many half-splits are averaged together to get human delta. HUMAN_SPLITS = 100 @@ -19,7 +19,6 @@ def __call__(self, source: BehavioralAssembly, target: BehavioralAssembly): # calculate score over average of 100 sub splits of human delta for i in range(HUMAN_SPLITS): - # grab one half of the subjeects random_state = np.random.RandomState(i) num_subjects = len(set(target["subject"].values)) @@ -32,10 +31,17 @@ def __call__(self, source: BehavioralAssembly, target: BehavioralAssembly): score = np.mean(scores) error = np.std(scores) - score = Score([score, error], coords={'aggregation': ['center', 'error']}, dims=('aggregation',)) + score = Score(score) + score.attrs['error'] = error score.attrs['raw'] = scores return score + def compute_ceiling(self, source: BehavioralAssembly, target: BehavioralAssembly, image_types: List[str]) -> float: + half_1_delta = get_human_delta(target=source, image_types=image_types) + half_2_delta = get_human_delta(target=target, image_types=image_types) + ceiling = max((1 - ((np.abs(half_1_delta - half_2_delta)) / half_2_delta)), 0) + return ceiling + def extract_subjects(assembly): return list(sorted(set(assembly['subject'].values))) @@ -86,10 +92,3 @@ def get_model_delta(source, image_types): # return difference between whole and condition model_delta = condition_scores_model[0] - condition_scores_model[1] return model_delta - - -def compute_ceiling(source: BehavioralAssembly, target: BehavioralAssembly, image_types: List[str]) -> float: - half_1_delta = get_human_delta(target=source, image_types=image_types) - half_2_delta = get_human_delta(target=target, image_types=image_types) - ceiling = max((1 - ((np.abs(half_1_delta - half_2_delta)) / half_2_delta)), 0) - return ceiling diff --git a/brainscore_vision/metrics/baker_accuracy_delta/test.py b/brainscore_vision/metrics/baker_accuracy_delta/test.py index b100d2ec1..948e7cd0a 100644 --- a/brainscore_vision/metrics/baker_accuracy_delta/test.py +++ b/brainscore_vision/metrics/baker_accuracy_delta/test.py @@ -1 +1 @@ -# tests apart of /benchmarks/baker2022/test.py, omitted here +# tests part of /benchmarks/baker2022/test.py, omitted here From 11e12720a2bd7e7123bd5a9e1f66c9ed2784b753 Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Tue, 9 Jul 2024 14:07:38 +0200 Subject: [PATCH 53/68] fix Travis build status (#1018) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1a6d92574..eae4d140f 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Build Status](https://travis-ci.com/brain-score/vision.svg?token=vqt7d2yhhpLGwHsiTZvT&branch=master)](https://travis-ci.com/brain-score/vision) +[![Build Status](https://app.travis-ci.com/brain-score/vision.svg?token=vqt7d2yhhpLGwHsiTZvT&branch=master)](https://app.travis-ci.com/brain-score/vision) [![Documentation Status](https://readthedocs.org/projects/brain-score/badge/?version=latest)](https://brain-score.readthedocs.io/en/latest/?badge=latest) [![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](code_of_conduct.md) From d203aa449a6e1dba338c833d26d25132af1341f0 Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Tue, 9 Jul 2024 08:10:07 -0400 Subject: [PATCH 54/68] Add voneresnet-50-non_stochastic (#959) * Add voneresnet-50-non_stochastic * fix imports * Replace setup.py with requirements.txt * Remove get_model_list * Update brainscore_vision/models/voneresnet-50-non_stochastic/model.py Co-authored-by: Martin Schrimpf * Remove redundant packages from requirements.txt * Add .git suffix * removing dash from package name * Add vonenet locally, try relative import * Fix S3 version * Bring deps from vonenet repo to model requirements --------- Co-authored-by: Ethan Pellegrini Co-authored-by: Martin Schrimpf Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> Co-authored-by: Deirdre Kelliher --- .../voneresnet_50_non_stochastic/__init__.py | 7 + .../voneresnet_50_non_stochastic/model.py | 104 +++ .../requirements.txt | 8 + .../voneresnet_50_non_stochastic/test.py | 8 + .../vonenet/LICENSE | 674 ++++++++++++++++++ .../vonenet/README.md | 105 +++ .../vonenet/run.py | 136 ++++ .../vonenet/setup.py | 41 ++ .../vonenet/train.py | 383 ++++++++++ .../vonenet/vonenet/__init__.py | 71 ++ .../vonenet/vonenet/back_ends.py | 337 +++++++++ .../vonenet/vonenet/modules.py | 126 ++++ .../vonenet/vonenet/params.py | 100 +++ .../vonenet/vonenet/utils.py | 32 + .../vonenet/vonenet/vonenet.py | 68 ++ .../vonenet_tutorial-activations.ipynb | 352 +++++++++ 16 files changed, 2552 insertions(+) create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/__init__.py create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/model.py create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/requirements.txt create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/test.py create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/LICENSE create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/run.py create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/setup.py create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/train.py create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/__init__.py create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/back_ends.py create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/params.py create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/utils.py create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/vonenet.py create mode 100644 brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet_tutorial-activations.ipynb diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/__init__.py b/brainscore_vision/models/voneresnet_50_non_stochastic/__init__.py new file mode 100644 index 000000000..fe60584ad --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/__init__.py @@ -0,0 +1,7 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['voneresnet-50-non_stochastic'] = lambda: ModelCommitment(identifier='voneresnet-50-non_stochastic', + activations_model=get_model('voneresnet-50-non_stochastic'), + layers=get_layers('voneresnet-50-non_stochastic')) \ No newline at end of file diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/model.py b/brainscore_vision/models/voneresnet_50_non_stochastic/model.py new file mode 100644 index 000000000..8052fd887 --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/model.py @@ -0,0 +1,104 @@ +import functools +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.check_submission import check_models +import torch +from torch.nn import Module +import torch.nn as nn +from brainscore_vision.model_helpers.s3 import load_weight_file +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +import ssl + +# needed import for globals() to work! +from .vonenet.vonenet.vonenet import VOneNet + +ssl._create_default_https_context = ssl._create_unverified_context + + +class Wrapper(Module): + def __init__(self, model): + super(Wrapper, self).__init__() + self.module = model + + +def get_model_from_s3(): + model_arch = 'resnet50' + pretrained = True + if pretrained and model_arch: + weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + relative_path="voneresnet-50-non_stochastic/voneresnet50_ns_e70.pth.tar", + version_id="vDk2cwi2xjwGqhGyyjp8lEGSfcaFzB61", + sha1="c270528818d6d7fc67a6aec86919d47311ad6221") + ckpt_data = torch.load(weights_path, map_location=torch.device('cpu')) + stride = ckpt_data['flags']['stride'] + simple_channels = ckpt_data['flags']['simple_channels'] + complex_channels = ckpt_data['flags']['complex_channels'] + k_exc = ckpt_data['flags']['k_exc'] + + noise_mode = ckpt_data['flags']['noise_mode'] + noise_scale = ckpt_data['flags']['noise_scale'] + noise_level = ckpt_data['flags']['noise_level'] + + model_id = ckpt_data['flags']['arch'].replace('_', '').lower() + + model = globals()[f'VOneNet'](model_arch=model_id, stride=stride, k_exc=k_exc, + simple_channels=simple_channels, complex_channels=complex_channels, + noise_mode=noise_mode, noise_scale=noise_scale, noise_level=noise_level) + + if model_arch.lower() == 'resnet50_at': + ckpt_data['state_dict'].pop('vone_block.div_u.weight') + ckpt_data['state_dict'].pop('vone_block.div_t.weight') + model.load_state_dict(ckpt_data['state_dict']) + else: + model = Wrapper(model) + model.load_state_dict(ckpt_data['state_dict']) + model = model.module + + model = nn.DataParallel(model) + else: + model = globals()[f'VOneNet'](model_arch=model_arch) + model = nn.DataParallel(model) + + model.to("cpu") + return model + +def get_model(name): + assert name == 'voneresnet-50-non_stochastic' + model = get_model_from_s3() + model = model.module + preprocessing = functools.partial(load_preprocess_images, image_size=224, + normalize_mean=(0.5, 0.5, 0.5), normalize_std=(0.5, 0.5, 0.5)) + wrapper = PytorchWrapper(identifier='vone' + 'resnet50_ns', model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'voneresnet-50-non_stochastic' + layers = ( + ['vone_block'] + + ['model.layer1.0', 'model.layer1.1', 'model.layer1.2'] + + ['model.layer2.0', 'model.layer2.1', 'model.layer2.2', 'model.layer2.3'] + + ['model.layer3.0', 'model.layer3.1', 'model.layer3.2', 'model.layer3.3', + 'model.layer3.4', 'model.layer3.5'] + + ['model.layer4.0', 'model.layer4.1', 'model.layer4.2'] + + ['model.avgpool'] + ) + return layers + + +def get_bibtex(model_identifier): + return """@inproceedings{NEURIPS2020_98b17f06, + author = {Dapello, Joel and Marques, Tiago and Schrimpf, Martin and Geiger, Franziska and Cox, David and DiCarlo, James J}, + booktitle = {Advances in Neural Information Processing Systems}, + editor = {H. Larochelle and M. Ranzato and R. Hadsell and M.F. Balcan and H. Lin}, + pages = {13073--13087}, + publisher = {Curran Associates, Inc.}, + title = {Simulating a Primary Visual Cortex at the Front of CNNs Improves Robustness to Image Perturbations}, + url = {https://proceedings.neurips.cc/paper_files/paper/2020/file/98b17f068d5d9b7668e19fb8ae470841-Paper.pdf}, + volume = {33}, + year = {2020} +}""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/requirements.txt b/brainscore_vision/models/voneresnet_50_non_stochastic/requirements.txt new file mode 100644 index 000000000..0a596fa72 --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/requirements.txt @@ -0,0 +1,8 @@ +torch +torchvision +numpy +pandas +scipy +tqdm +fire +requests diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/test.py b/brainscore_vision/models/voneresnet_50_non_stochastic/test.py new file mode 100644 index 000000000..0cf015bc9 --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('voneresnet-50-non_stochastic') + assert model.identifier == 'voneresnet-50-non_stochastic' \ No newline at end of file diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/LICENSE b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/LICENSE new file mode 100644 index 000000000..f288702d2 --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md new file mode 100644 index 000000000..756f30b1a --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md @@ -0,0 +1,105 @@ + +# VOneNet: CNNs with a Primary Visual Cortex Front-End + +A family of biologically-inspired Convolutional Neural Networks (CNNs). VOneNets have the following features: +- Fixed-weight neural network model of the primate primary visual cortex (V1) as the front-end. +- Robust to image perturbations +- Brain-mapped +- Flexible: can be adapted to different back-end architectures + +[read more...](#longer-motivation) + +## Available Models +*(Click on model names to download the weights of ImageNet-trained models. Alternatively, you can use the function get_model in the vonenet package to download the weights.)* + +| Name | Description | +| -------- | ------------------------------------------------------------------------ | +| [VOneResNet50](https://vonenet-models.s3.us-east-2.amazonaws.com/voneresnet50_e70.pth.tar) | Our best performing VOneNet with a ResNet50 back-end | +| [VOneCORnet-S](https://vonenet-models.s3.us-east-2.amazonaws.com/vonecornets_e70.pth.tar) | VOneNet with a recurrent neural network back-end based on the CORnet-S | +| [VOneAlexNet](https://vonenet-models.s3.us-east-2.amazonaws.com/vonealexnet_e70.pth.tar) | VOneNet with a back-end based on AlexNet | + + +## Quick Start + +VOneNets was trained with images normalized with mean=[0.5,0.5,0.5] and std=[0.5,0.5,0.5] + +More information coming soon... + + +## Longer Motivation + +Current state-of-the-art object recognition models are largely based on convolutional neural network (CNN) architectures, which are loosely inspired by the primate visual system. However, these CNNs can be fooled by imperceptibly small, explicitly crafted perturbations, and struggle to recognize objects in corrupted images that are easily recognized by humans. Recently, we observed that CNN models with a neural hidden layer that better matches primate primary visual cortex (V1) are also more robust to adversarial attacks. Inspired by this observation, we developed VOneNets, a new class of hybrid CNN vision models. Each VOneNet contains a fixed weight neural network front-end that simulates primate V1, called the VOneBlock, followed by a neural network back-end adapted from current CNN vision models. The VOneBlock is based on a classical neuroscientific model of V1: the linear-nonlinear-Poisson model, consisting of a biologically-constrained Gabor filter bank, simple and complex cell nonlinearities, and a V1 neuronal stochasticity generator. After training, VOneNets retain high ImageNet performance, but each is substantially more robust, outperforming the base CNNs and state-of-the-art methods by 18% and 3%, respectively, on a conglomerate benchmark of perturbations comprised of white box adversarial attacks and common image corruptions. Additionally, all components of the VOneBlock work in synergy to improve robustness. +Read more: [Dapello\*, Marques\*, et al. (biorxiv, 2020)](https://doi.org/10.1101/2020.06.16.154542) + + + +## Requirements + +- Python 3.6+ +- PyTorch 0.4.1+ +- numpy +- pandas +- tqdm +- scipy + + +## Citation + +Dapello, J., Marques, T., Schrimpf, M., Geiger, F., Cox, D.D., DiCarlo, J.J. (2020) Simulating a Primary Visual Cortex at the Front of CNNs Improves Robustness to Image Perturbations. *biorxiv.* doi.org/10.1101/2020.06.16.154542 + + +## License + +GNU GPL 3+ + + +## FAQ + +Soon... + +## Setup and Run + +1. You need to clone it in your local repository + $ git clone https://github.com/dicarlolab/vonenet.git + +2. And when you setup its codes, you must need 'val' directory. so here is link. + this link is from Korean's blog I refered as below https://seongkyun.github.io/others/2019/03/06/imagenet_dn/ + + ** Download link** + https://academictorrents.com/collection/imagenet-2012 + + Once you download that large tar files, you must unzip that files + -- all instructions below are refered above link, I only translate it + + # Unzip training dataset + $ mkdir train && mb ILSVRC2012_img_train.tar train/ && cd train + $ tar -xvf ILSVRC2012_img_train.tar + $ rm -f ILSVRC2012_img_train.tar (If you want to remove zipped file(tar)) + $ find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done + $ cd .. + + # Unzip validation dataset + $ mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xvf ILSVRC2012_img_val.tar + $ wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash + + when it's finished, you can see train directory, val directory + that 'val' directory is needed when setting up + + ## Caution!!!! + after all execution above, must remove directory or file not having name n0000 + -> there will be fault in training + -> ex) 'ILSVRC2012_img_train' in train directory, 'ILSVRC2012_img_val.tar' in val directory + +3. if you've done getting data, then we can setting up + go to local repository which into you cloned and open terminal (you must check your versions of python, pytorch, cudatoolkit if okay then,) + $ python3 setup.py install + $ python3 run.py --in_path {directory including above dataset, 'val' directory must be in!} + + # If you see any GPU related problem especially 'GPU is not available' although you already got + $ python3 run.py --in_path {directory including above dataset, 'val' directory must be in!} --ngpus 0 + + ngpus is 1 as default. if you don't care running on CPU you do so + + + + diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/run.py b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/run.py new file mode 100644 index 000000000..a2fcd5923 --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/run.py @@ -0,0 +1,136 @@ + +import os, argparse, time, subprocess, io, shlex +import pandas as pd +import tqdm + +parser = argparse.ArgumentParser(description='ImageNet Validation') + +parser.add_argument('--in_path', required=True, + help='path to ImageNet folder that contains val folder') +parser.add_argument('--batch_size', default=128, type=int, + help='size of batch for validation') +parser.add_argument('--workers', default=20, + help='number of data loading workers') +parser.add_argument('--ngpus', default=1, type=int, + help='number of GPUs to use; 0 if you want to run on CPU') +parser.add_argument('--model_arch', choices=['alexnet', 'resnet50', 'resnet50_at', 'cornets'], default='resnet50', + help='back-end model architecture to load') + +FLAGS, FIRE_FLAGS = parser.parse_known_args() + + +def set_gpus(n=2): + """ + Finds all GPUs on the system and restricts to n of them that have the most + free memory. + """ + if n > 0: + gpus = subprocess.run(shlex.split( + 'nvidia-smi --query-gpu=index,memory.free,memory.total --format=csv,nounits'), check=True, + stdout=subprocess.PIPE).stdout + gpus = pd.read_csv(io.BytesIO(gpus), sep=', ', engine='python') + gpus = gpus[gpus['memory.total [MiB]'] > 10000] # only above 10 GB + if os.environ.get('CUDA_VISIBLE_DEVICES') is not None: + visible = [int(i) + for i in os.environ['CUDA_VISIBLE_DEVICES'].split(',')] + gpus = gpus[gpus['index'].isin(visible)] + gpus = gpus.sort_values(by='memory.free [MiB]', ascending=False) + os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # making sure GPUs are numbered the same way as in nvidia_smi + os.environ['CUDA_VISIBLE_DEVICES'] = ','.join( + [str(i) for i in gpus['index'].iloc[:n]]) + else: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + + +set_gpus(FLAGS.ngpus) + +import torch +import torch.nn as nn +import torchvision +from vonenet import get_model + +device = torch.device("cuda" if FLAGS.ngpus > 0 else "cpu") + + +def val(): + model = get_model(model_arch=FLAGS.model_arch, pretrained=True) + + if FLAGS.ngpus == 0: + print('Running on CPU') + if FLAGS.ngpus > 0 and torch.cuda.device_count() > 1: + print('Running on multiple GPUs') + model = model.to(device) + elif FLAGS.ngpus > 0 and torch.cuda.device_count() is 1: + print('Running on single GPU') + model = model.to(device) + else: + print('No GPU detected!') + model = model.module + + validator = ImageNetVal(model) + record = validator() + + print(record['top1']) + print(record['top5']) + return + + +class ImageNetVal(object): + + def __init__(self, model): + self.name = 'val' + self.model = model + self.data_loader = self.data() + self.loss = nn.CrossEntropyLoss(size_average=False) + self.loss = self.loss.to(device) + + def data(self): + dataset = torchvision.datasets.ImageFolder( + os.path.join(FLAGS.in_path, 'val'), + torchvision.transforms.Compose([ + torchvision.transforms.Resize(256), + torchvision.transforms.CenterCrop(224), + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], + std=[0.5, 0.5, 0.5]), + ])) + data_loader = torch.utils.data.DataLoader(dataset, + batch_size=FLAGS.batch_size, + shuffle=False, + num_workers=FLAGS.workers, + pin_memory=True) + + return data_loader + + def __call__(self): + self.model.eval() + start = time.time() + record = {'loss': 0, 'top1': 0, 'top5': 0} + with torch.no_grad(): + for (inp, target) in tqdm.tqdm(self.data_loader, desc=self.name): + target = target.to(device) + output = self.model(inp) + + record['loss'] += self.loss(output, target).item() + p1, p5 = accuracy(output, target, topk=(1, 5)) + record['top1'] += p1 + record['top5'] += p5 + + for key in record: + record[key] /= len(self.data_loader.dataset.samples) + record['dur'] = (time.time() - start) / len(self.data_loader) + + return record + + +def accuracy(output, target, topk=(1,)): + with torch.no_grad(): + _, pred = output.topk(max(topk), dim=1, largest=True, sorted=True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + res = [correct[:k].sum().item() for k in topk] + return res + + +if __name__ == '__main__': + val() diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/setup.py b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/setup.py new file mode 100644 index 000000000..c100c0534 --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/setup.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup + +with open('README.md') as readme_file: + readme = readme_file.read() + +requirements = [ + "torch>=0.4.0+", + "torchvision", + "numpy", + "pandas", + "scipy", + "tqdm", + "fire", + "requests", +] + +setup( + name='vonenet', + version='0.1.0', + description="CNNs with a Primary Visual Cortex Front-End ", + long_description=readme, + author="Tiago Marques, Joel Dapello", + author_email='tmarques@mit.edu, dapello@mit.edu', + url='https://github.com/dicarlolab/vonenet', + packages=['vonenet'], + include_package_data=True, + install_requires=requirements, + license="GNU GPL v3", + zip_safe=False, + keywords='VOneNet, Robustness, Primary Visual Cortex', + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: GNU GPL v3', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.6' + ], +) diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/train.py b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/train.py new file mode 100644 index 000000000..fe9f8de30 --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/train.py @@ -0,0 +1,383 @@ + +import os, argparse, time, subprocess, io, shlex, pickle, pprint +import pandas as pd +import numpy as np +import tqdm +import fire + +parser = argparse.ArgumentParser(description='ImageNet Training') +## General parameters +parser.add_argument('--in_path', required=True, + help='path to ImageNet folder that contains train and val folders') +parser.add_argument('-o', '--output_path', default=None, + help='path for storing ') +parser.add_argument('-restore_epoch', '--restore_epoch', default=0, type=int, + help='epoch number for restoring model training ') +parser.add_argument('-restore_path', '--restore_path', default=None, type=str, + help='path of folder containing specific epoch file for restoring model training') + +## Training parameters +parser.add_argument('--ngpus', default=0, type=int, + help='number of GPUs to use; 0 if you want to run on CPU') +parser.add_argument('-j', '--workers', default=20, type=int, + help='number of data loading workers') +parser.add_argument('--epochs', default=70, type=int, + help='number of total epochs to run') +parser.add_argument('--batch_size', default=256, type=int, + help='mini-batch size') +parser.add_argument('--optimizer', choices=['stepLR', 'plateauLR'], default='stepLR', + help='Optimizer') +parser.add_argument('--lr', '--learning_rate', default=.1, type=float, + help='initial learning rate') +parser.add_argument('--step_size', default=20, type=int, + help='after how many epochs learning rate should be decreased by step_factor') +parser.add_argument('--step_factor', default=0.1, type=float, + help='factor by which to decrease the learning rate') +parser.add_argument('--momentum', default=.9, type=float, help='momentum') +parser.add_argument('--weight_decay', default=1e-4, type=float, + help='weight decay ') + +## Model parameters +parser.add_argument('--torch_seed', default=0, type=int, + help='seed for weights initializations and torch RNG') +parser.add_argument('--model_arch', choices=['alexnet', 'resnet50', 'resnet50_at', 'cornets'], default='resnet50', + help='back-end model architecture to load') +parser.add_argument('--normalization', choices=['vonenet', 'imagenet'], default='vonenet', + help='image normalization to apply to models') +parser.add_argument('--visual_degrees', default=8, type=float, + help='Field-of-View of the model in visual degrees') + +## VOneBlock parameters +# Gabor filter bank +parser.add_argument('--stride', default=4, type=int, + help='stride for the first convolution (Gabor Filter Bank)') +parser.add_argument('--ksize', default=25, type=int, + help='kernel size for the first convolution (Gabor Filter Bank)') +parser.add_argument('--simple_channels', default=256, type=int, + help='number of simple channels in V1 block') +parser.add_argument('--complex_channels', default=256, type=int, + help='number of complex channels in V1 block') +parser.add_argument('--gabor_seed', default=0, type=int, + help='seed for gabor initialization') +parser.add_argument('--sf_corr', default=0.75, type=float, + help='') +parser.add_argument('--sf_max', default=6, type=float, + help='') +parser.add_argument('--sf_min', default=0, type=float, + help='') +parser.add_argument('--rand_param', choices=[True, False], default=False, type=bool, + help='random gabor params') +parser.add_argument('--k_exc', default=25, type=float, + help='') + +# Noise layer +parser.add_argument('--noise_mode', choices=['gaussian', 'neuronal', None], + default=None, + help='noise distribution') +parser.add_argument('--noise_scale', default=1, type=float, + help='noise scale factor') +parser.add_argument('--noise_level', default=1, type=float, + help='noise level') + + +FLAGS, FIRE_FLAGS = parser.parse_known_args() + + +def set_gpus(n=2): + """ + Finds all GPUs on the system and restricts to n of them that have the most + free memory. + """ + if n > 0: + gpus = subprocess.run(shlex.split( + 'nvidia-smi --query-gpu=index,memory.free,memory.total --format=csv,nounits'), check=True, + stdout=subprocess.PIPE).stdout + gpus = pd.read_csv(io.BytesIO(gpus), sep=', ', engine='python') + gpus = gpus[gpus['memory.total [MiB]'] > 10000] # only above 10 GB + if os.environ.get('CUDA_VISIBLE_DEVICES') is not None: + visible = [int(i) + for i in os.environ['CUDA_VISIBLE_DEVICES'].split(',')] + gpus = gpus[gpus['index'].isin(visible)] + gpus = gpus.sort_values(by='memory.free [MiB]', ascending=False) + os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # making sure GPUs are numbered the same way as in nvidia_smi + os.environ['CUDA_VISIBLE_DEVICES'] = ','.join( + [str(i) for i in gpus['index'].iloc[:n]]) + else: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + + +if FLAGS.ngpus > 0: + set_gpus(FLAGS.ngpus) + +import torch +import torch.nn as nn +import torch.utils.model_zoo +import torchvision +from vonenet import get_model + +torch.manual_seed(FLAGS.torch_seed) + +torch.backends.cudnn.benchmark = True + +if FLAGS.ngpus > 0: + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +else: + device = 'cpu' + +if FLAGS.normalization == 'vonenet': + print('VOneNet normalization') + norm_mean = [0.5, 0.5, 0.5] + norm_std = [0.5, 0.5, 0.5] +elif FLAGS.normalization == 'imagenet': + print('Imagenet standard normalization') + norm_mean = [0.485, 0.456, 0.406] + norm_std = [0.229, 0.224, 0.225] + + +def load_model(): + map_location = None if FLAGS.ngpus > 0 else 'cpu' + print('Getting VOneNet') + model = get_model(map_location=map_location, model_arch=FLAGS.model_arch, pretrained=False, + visual_degrees=FLAGS.visual_degrees, stride=FLAGS.stride, ksize=FLAGS.ksize, + sf_corr=FLAGS.sf_corr, sf_max=FLAGS.sf_max, sf_min=FLAGS.sf_min, rand_param=FLAGS.rand_param, + gabor_seed=FLAGS.gabor_seed, simple_channels=FLAGS.simple_channels, + complex_channels=FLAGS.simple_channels, noise_mode=FLAGS.noise_mode, + noise_scale=FLAGS.noise_scale, noise_level=FLAGS.noise_level, k_exc=FLAGS.k_exc) + + if FLAGS.ngpus > 0 and torch.cuda.device_count() > 1: + print('We have multiple GPUs detected') + model = model.to(device) + elif FLAGS.ngpus > 0 and torch.cuda.device_count() is 1: + print('We run on GPU') + model = model.to(device) + else: + print('No GPU detected!') + model = model.module + + return model + + +def train(save_train_epochs=.2, # how often save output during training + save_val_epochs=.5, # how often save output during validation + save_model_epochs=1, # how often save model weights + save_model_secs=720 * 10 # how often save model (in sec) + ): + + model = load_model() + + trainer = ImageNetTrain(model) + validator = ImageNetVal(model) + + start_epoch = 0 + records = [] + + if FLAGS.restore_epoch > 0: + print('Restoring from previous...') + ckpt_data = torch.load(os.path.join(FLAGS.restore_path, f'epoch_{FLAGS.restore_epoch:02d}.pth.tar')) + start_epoch = ckpt_data['epoch'] + print('Loaded epoch: '+str(start_epoch)) + model.load_state_dict(ckpt_data['state_dict']) + trainer.optimizer.load_state_dict(ckpt_data['optimizer']) + results_old = pickle.load(open(os.path.join(FLAGS.restore_path, 'results.pkl'), 'rb')) + for result in results_old: + records.append(result) + + results = {'meta': {'step_in_epoch': 0, + 'epoch': start_epoch, + 'wall_time': time.time()} + } + + # records = [] + recent_time = time.time() + + nsteps = len(trainer.data_loader) + + if save_train_epochs is not None: + save_train_steps = (np.arange(0, FLAGS.epochs + 1, + save_train_epochs) * nsteps).astype(int) + if save_val_epochs is not None: + save_val_steps = (np.arange(0, FLAGS.epochs + 1, + save_val_epochs) * nsteps).astype(int) + if save_model_epochs is not None: + save_model_steps = (np.arange(0, FLAGS.epochs + 1, + save_model_epochs) * nsteps).astype(int) + + for epoch in tqdm.trange(start_epoch, FLAGS.epochs + 1, initial=0, desc='epoch'): + print(epoch) + data_load_start = np.nan + + data_loader_iter = trainer.data_loader + + for step, data in enumerate(tqdm.tqdm(data_loader_iter, desc=trainer.name)): + data_load_time = time.time() - data_load_start + global_step = epoch * nsteps + step + + if save_val_steps is not None: + if global_step in save_val_steps: + results[validator.name] = validator() + if FLAGS.optimizer == 'plateauLR' and step == 0: + trainer.lr.step(results[validator.name]['loss']) + trainer.model.train() + print('LR: ', trainer.optimizer.param_groups[0]["lr"]) + + if FLAGS.output_path is not None: + if not (os.path.isdir(FLAGS.output_path)): + os.mkdir(FLAGS.output_path) + + records.append(results) + if len(results) > 1: + pickle.dump(records, open(os.path.join(FLAGS.output_path, 'results.pkl'), 'wb')) + + ckpt_data = {} + ckpt_data['flags'] = FLAGS.__dict__.copy() + ckpt_data['epoch'] = epoch + ckpt_data['state_dict'] = model.state_dict() + ckpt_data['optimizer'] = trainer.optimizer.state_dict() + + if save_model_secs is not None: + if time.time() - recent_time > save_model_secs: + torch.save(ckpt_data, os.path.join(FLAGS.output_path, + 'latest_checkpoint.pth.tar')) + recent_time = time.time() + + if save_model_steps is not None: + if global_step in save_model_steps: + torch.save(ckpt_data, os.path.join(FLAGS.output_path, + f'epoch_{epoch:02d}.pth.tar')) + + else: + if len(results) > 1: + pprint.pprint(results) + + if epoch < FLAGS.epochs: + frac_epoch = (global_step + 1) / nsteps + record = trainer(frac_epoch, *data) + record['data_load_dur'] = data_load_time + results = {'meta': {'step_in_epoch': step + 1, + 'epoch': frac_epoch, + 'wall_time': time.time()} + } + if save_train_steps is not None: + if step in save_train_steps: + results[trainer.name] = record + + data_load_start = time.time() + + +class ImageNetTrain(object): + + def __init__(self, model): + self.name = 'train' + self.model = model + self.data_loader = self.data() + self.optimizer = torch.optim.SGD(self.model.parameters(), FLAGS.lr, momentum=FLAGS.momentum, + weight_decay=FLAGS.weight_decay) + if FLAGS.optimizer == 'stepLR': + self.lr = torch.optim.lr_scheduler.StepLR(self.optimizer, gamma=FLAGS.step_factor, + step_size=FLAGS.step_size) + elif FLAGS.optimizer == 'plateauLR': + self.lr = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, factor=FLAGS.step_factor, + patience=FLAGS.step_size-1, threshold=0.01) + self.loss = nn.CrossEntropyLoss() + if FLAGS.ngpus > 0: + self.loss = self.loss.cuda() + + def data(self): + dataset = torchvision.datasets.ImageFolder( + os.path.join(FLAGS.in_path, 'train'), + torchvision.transforms.Compose([ + torchvision.transforms.RandomResizedCrop(224), + torchvision.transforms.RandomHorizontalFlip(), + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize(mean=norm_mean, std=norm_std) + ])) + data_loader = torch.utils.data.DataLoader(dataset, + batch_size=FLAGS.batch_size, + shuffle=True, + num_workers=FLAGS.workers, + pin_memory=True) + + return data_loader + + def __call__(self, frac_epoch, inp, target): + start = time.time() + if FLAGS.optimizer == 'stepLR': + self.lr.step(epoch=frac_epoch) + target = target.to(device) + + output = self.model(inp) + + record = {} + loss = self.loss(output, target) + record['loss'] = loss.item() + record['top1'], record['top5'] = accuracy(output, target, topk=(1, 5)) + record['top1'] /= len(output) + record['top5'] /= len(output) + # record['learning_rate'] = self.lr.get_lr()[0] + record['learning_rate'] = self.optimizer.param_groups[0]["lr"] + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + + record['dur'] = time.time() - start + return record + + +class ImageNetVal(object): + + def __init__(self, model): + self.name = 'val' + self.model = model + self.data_loader = self.data() + self.loss = nn.CrossEntropyLoss(size_average=False) + self.loss = self.loss.to(device) + + def data(self): + dataset = torchvision.datasets.ImageFolder( + os.path.join(FLAGS.in_path, 'val'), + torchvision.transforms.Compose([ + torchvision.transforms.Resize(256), + torchvision.transforms.CenterCrop(224), + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize(mean=norm_mean, std=norm_std), + ])) + data_loader = torch.utils.data.DataLoader(dataset, + batch_size=FLAGS.batch_size, + shuffle=False, + num_workers=FLAGS.workers, + pin_memory=True) + + return data_loader + + def __call__(self): + self.model.eval() + start = time.time() + record = {'loss': 0, 'top1': 0, 'top5': 0} + with torch.no_grad(): + for (inp, target) in tqdm.tqdm(self.data_loader, desc=self.name): + target = target.to(device) + output = self.model(inp) + + record['loss'] += self.loss(output, target).item() + p1, p5 = accuracy(output, target, topk=(1, 5)) + record['top1'] += p1 + record['top5'] += p5 + + for key in record: + record[key] /= len(self.data_loader.dataset.samples) + record['dur'] = (time.time() - start) / len(self.data_loader) + + return record + + +def accuracy(output, target, topk=(1,)): + with torch.no_grad(): + _, pred = output.topk(max(topk), dim=1, largest=True, sorted=True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + res = [correct[:k].sum().item() for k in topk] + return res + + +if __name__ == '__main__': + fire.Fire(command=FIRE_FLAGS) diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/__init__.py b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/__init__.py new file mode 100644 index 000000000..75f6ca349 --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/__init__.py @@ -0,0 +1,71 @@ +import torch +import torch.nn as nn +import os +import requests + +from .vonenet import VOneNet +from torch.nn import Module + +FILE_WEIGHTS = {'alexnet': 'vonealexnet_e70.pth.tar', 'resnet50': 'voneresnet50_e70.pth.tar', + 'resnet50_at': 'voneresnet50_at_e96.pth.tar', 'cornets': 'vonecornets_e70.pth.tar', + 'resnet50_ns': 'voneresnet50_ns_e70.pth.tar'} + + +class Wrapper(Module): + def __init__(self, model): + super(Wrapper, self).__init__() + self.module = model + + +def get_model(model_arch='resnet50', pretrained=True, map_location='cpu', **kwargs): + """ + Returns a VOneNet model. + Select pretrained=True for returning one of the 3 pretrained models. + model_arch: string with identifier to choose the architecture of the back-end (resnet50, cornets, alexnet) + """ + if pretrained and model_arch: + url = f'https://vonenet-models.s3.us-east-2.amazonaws.com/{FILE_WEIGHTS[model_arch.lower()]}' + home_dir = os.environ['HOME'] + vonenet_dir = os.path.join(home_dir, '.vonenet') + weightsdir_path = os.path.join(vonenet_dir, FILE_WEIGHTS[model_arch.lower()]) + if not os.path.exists(vonenet_dir): + os.makedirs(vonenet_dir) + if not os.path.exists(weightsdir_path): + print('Downloading model weights to ', weightsdir_path) + r = requests.get(url, allow_redirects=True) + open(weightsdir_path, 'wb').write(r.content) + + ckpt_data = torch.load(weightsdir_path, map_location=map_location) + + stride = ckpt_data['flags']['stride'] + simple_channels = ckpt_data['flags']['simple_channels'] + complex_channels = ckpt_data['flags']['complex_channels'] + k_exc = ckpt_data['flags']['k_exc'] + + noise_mode = ckpt_data['flags']['noise_mode'] + noise_scale = ckpt_data['flags']['noise_scale'] + noise_level = ckpt_data['flags']['noise_level'] + + model_id = ckpt_data['flags']['arch'].replace('_','').lower() + + model = globals()[f'VOneNet'](model_arch=model_id, stride=stride, k_exc=k_exc, + simple_channels=simple_channels, complex_channels=complex_channels, + noise_mode=noise_mode, noise_scale=noise_scale, noise_level=noise_level) + + if model_arch.lower() == 'resnet50_at': + ckpt_data['state_dict'].pop('vone_block.div_u.weight') + ckpt_data['state_dict'].pop('vone_block.div_t.weight') + model.load_state_dict(ckpt_data['state_dict']) + else: + model = Wrapper(model) + model.load_state_dict(ckpt_data['state_dict']) + model = model.module + + model = nn.DataParallel(model) + else: + model = globals()[f'VOneNet'](model_arch=model_arch, **kwargs) + model = nn.DataParallel(model) + + model.to(map_location) + return model + diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/back_ends.py b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/back_ends.py new file mode 100644 index 000000000..a210e28d9 --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/back_ends.py @@ -0,0 +1,337 @@ + +import numpy as np +import torch +from torch import nn +from collections import OrderedDict + + +# AlexNet Back-End architecture +# Based on Torchvision implementation in +# https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py +class AlexNetBackEnd(nn.Module): + def __init__(self, num_classes=1000): + super().__init__() + self.features = nn.Sequential( + nn.Conv2d(64, 192, kernel_size=5, stride=2, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1), + nn.Conv2d(192, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1), + ) + self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) + self.classifier = nn.Sequential( + nn.Dropout(), + nn.Linear(256 * 7 * 7, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Linear(4096, num_classes), + ) + + def forward(self, x): + x = self.features(x) + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.classifier(x) + return x + + +# ResNet Back-End architecture +# Based on Torchvision implementation in +# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + __constants__ = ['downsample'] + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) # + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + __constants__ = ['downsample'] + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) # inplace=True + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNetBackEnd(nn.Module): + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None): + super(ResNetBackEnd, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + # See note [TorchScript super()] + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.fc(x) + + return x + + def forward(self, x): + return self._forward_impl(x) + + +# CORnet-S Back-End architecture +# Based on CORnet code in +# https://github.com/dicarlolab/CORnet +class Flatten(nn.Module): + def forward(self, x): + return x.view(x.size(0), -1) + + +class Identity(nn.Module): + def forward(self, x): + return x + + +class CORblock_S(nn.Module): + + scale = 4 # scale of the bottleneck convolution channels + + def __init__(self, in_channels, out_channels, times=1): + super().__init__() + + self.times = times + + self.conv_input = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) + self.skip = nn.Conv2d(out_channels, out_channels, + kernel_size=1, stride=2, bias=False) + self.norm_skip = nn.BatchNorm2d(out_channels) + + self.conv1 = nn.Conv2d(out_channels, out_channels * self.scale, + kernel_size=1, bias=False) + self.nonlin1 = nn.ReLU(inplace=True) # + + self.conv2 = nn.Conv2d(out_channels * self.scale, out_channels * self.scale, + kernel_size=3, stride=2, padding=1, bias=False) + self.nonlin2 = nn.ReLU(inplace=True) # + + self.conv3 = nn.Conv2d(out_channels * self.scale, out_channels, + kernel_size=1, bias=False) + self.nonlin3 = nn.ReLU(inplace=True) # + + self.output = Identity() # for an easy access to this block's output + + # need BatchNorm for each time step for training to work well + for t in range(self.times): + setattr(self, f'norm1_{t}', nn.BatchNorm2d(out_channels * self.scale)) + setattr(self, f'norm2_{t}', nn.BatchNorm2d(out_channels * self.scale)) + setattr(self, f'norm3_{t}', nn.BatchNorm2d(out_channels)) + + def forward(self, inp): + x = self.conv_input(inp) + + for t in range(self.times): + if t == 0: + skip = self.norm_skip(self.skip(x)) + self.conv2.stride = (2, 2) + else: + skip = x + self.conv2.stride = (1, 1) + + x = self.conv1(x) + x = getattr(self, f'norm1_{t}')(x) + x = self.nonlin1(x) + + x = self.conv2(x) + x = getattr(self, f'norm2_{t}')(x) + x = self.nonlin2(x) + + x = self.conv3(x) + x = getattr(self, f'norm3_{t}')(x) + + x += skip + x = self.nonlin3(x) + output = self.output(x) + + return output + + +class CORnetSBackEnd(nn.Module): + def __init__(self, num_classes=1000): + super(CORnetSBackEnd, self).__init__() + + self.V2 = CORblock_S(64, 128, times=2) + self.V4 = CORblock_S(128, 256, times=4) + self.IT = CORblock_S(256, 512, times=2) + self.decoder = nn.Sequential(OrderedDict([ + ('avgpool', nn.AdaptiveAvgPool2d(1)), + ('flatten', Flatten()), + ('linear', nn.Linear(512, num_classes)), + ('output', Identity()) + ])) + + # weight initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, np.sqrt(2. / n)) + # nn.Linear is missing here because I originally forgot + # to add it during the training of this network + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def forward(self, x): + x = self.V2(x) + x = self.V4(x) + x = self.IT(x) + x = self.decoder(x) + return x diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py new file mode 100644 index 000000000..fe5e53491 --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py @@ -0,0 +1,126 @@ + +import numpy as np +import torch +from torch import nn +from torch.nn import functional as F +from .utils import gabor_kernel + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + +class Identity(nn.Module): + def forward(self, x): + return x + + +class GFB(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=4): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = (kernel_size, kernel_size) + self.stride = (stride, stride) + self.padding = (kernel_size // 2, kernel_size // 2) + + # Param instatiations + self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) + + def forward(self, x): + return F.conv2d(x, self.weight, None, self.stride, self.padding) + + def initialize(self, sf, theta, sigx, sigy, phase): + random_channel = torch.randint(0, self.in_channels, (self.out_channels,)) + for i in range(self.out_channels): + self.weight[i, random_channel[i]] = gabor_kernel(frequency=sf[i], sigma_x=sigx[i], sigma_y=sigy[i], + theta=theta[i], offset=phase[i], ks=self.kernel_size[0]) + self.weight = nn.Parameter(self.weight, requires_grad=False) + + +class VOneBlock(nn.Module): + def __init__(self, sf, theta, sigx, sigy, phase, + k_exc=25, noise_mode=None, noise_scale=1, noise_level=1, + simple_channels=128, complex_channels=128, ksize=25, stride=4, input_size=224): + super().__init__() + + self.in_channels = 3 + + self.simple_channels = simple_channels + self.complex_channels = complex_channels + self.out_channels = simple_channels + complex_channels + self.stride = stride + self.input_size = input_size + + self.sf = sf + self.theta = theta + self.sigx = sigx + self.sigy = sigy + self.phase = phase + self.k_exc = k_exc + + self.set_noise_mode(noise_mode, noise_scale, noise_level) + self.fixed_noise = None + + self.simple_conv_q0 = GFB(self.in_channels, self.out_channels, ksize, stride) + self.simple_conv_q1 = GFB(self.in_channels, self.out_channels, ksize, stride) + self.simple_conv_q0.initialize(sf=self.sf, theta=self.theta, sigx=self.sigx, sigy=self.sigy, + phase=self.phase) + self.simple_conv_q1.initialize(sf=self.sf, theta=self.theta, sigx=self.sigx, sigy=self.sigy, + phase=self.phase + np.pi / 2) + + self.simple = nn.ReLU(inplace=True) + self.complex = Identity() + self.gabors = Identity() + self.noise = nn.ReLU(inplace=True) + self.output = Identity() + + def forward(self, x): + # Gabor activations [Batch, out_channels, H/stride, W/stride] + x = self.gabors_f(x) + # Noise [Batch, out_channels, H/stride, W/stride] + x = self.noise_f(x) + # V1 Block output: (Batch, out_channels, H/stride, W/stride) + x = self.output(x) + return x + + def gabors_f(self, x): + s_q0 = self.simple_conv_q0(x) + s_q1 = self.simple_conv_q1(x) + c = self.complex(torch.sqrt(s_q0[:, self.simple_channels:, :, :] ** 2 + + s_q1[:, self.simple_channels:, :, :] ** 2) / np.sqrt(2)) + s = self.simple(s_q0[:, 0:self.simple_channels, :, :]) + return self.gabors(self.k_exc * torch.cat((s, c), 1)) + + def noise_f(self, x): + if self.noise_mode == 'neuronal': + eps = 10e-5 + x *= self.noise_scale + x += self.noise_level + if self.fixed_noise is not None: + x += self.fixed_noise * torch.sqrt(F.relu(x.clone()) + eps) + else: + x += torch.distributions.normal.Normal(torch.zeros_like(x), scale=1).rsample() * \ + torch.sqrt(F.relu(x.clone()) + eps) + x -= self.noise_level + x /= self.noise_scale + if self.noise_mode == 'gaussian': + if self.fixed_noise is not None: + x += self.fixed_noise * self.noise_scale + else: + x += torch.distributions.normal.Normal(torch.zeros_like(x), scale=1).rsample() * self.noise_scale + return self.noise(x) + + def set_noise_mode(self, noise_mode=None, noise_scale=1, noise_level=1): + self.noise_mode = noise_mode + self.noise_scale = noise_scale + self.noise_level = noise_level + + def fix_noise(self, batch_size=256, seed=None): + noise_mean = torch.zeros(batch_size, self.out_channels, int(self.input_size/self.stride), + int(self.input_size/self.stride)) + if seed: + torch.manual_seed(seed) + if self.noise_mode: + self.fixed_noise = torch.distributions.normal.Normal(noise_mean, scale=1).rsample().to(device) + + def unfix_noise(self): + self.fixed_noise = None diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/params.py b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/params.py new file mode 100644 index 000000000..76f2e7590 --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/params.py @@ -0,0 +1,100 @@ + +import numpy as np +from .utils import sample_dist +import scipy.stats as stats + + +def generate_gabor_param(features, seed=0, rand_flag=False, sf_corr=0, sf_max=9, sf_min=0): + # Generates random sample + np.random.seed(seed) + + phase_bins = np.array([0, 360]) + phase_dist = np.array([1]) + + if rand_flag: + print('Uniform gabor parameters') + ori_bins = np.array([0, 180]) + ori_dist = np.array([1]) + + nx_bins = np.array([0.1, 10**0.2]) + nx_dist = np.array([1]) + + ny_bins = np.array([0.1, 10**0.2]) + ny_dist = np.array([1]) + + # sf_bins = np.array([0.5, 8]) + # sf_dist = np.array([1]) + + sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8]) + sf_dist = np.array([1, 1, 1, 1, 1, 1, 1, 1]) + + sfmax_ind = np.where(sf_bins < sf_max)[0][-1] + sfmin_ind = np.where(sf_bins >= sf_min)[0][0] + + sf_bins = sf_bins[sfmin_ind:sfmax_ind+1] + sf_dist = sf_dist[sfmin_ind:sfmax_ind] + + sf_dist = sf_dist / sf_dist.sum() + else: + print('Neuronal distributions gabor parameters') + # DeValois 1982a + ori_bins = np.array([-22.5, 22.5, 67.5, 112.5, 157.5]) + ori_dist = np.array([66, 49, 77, 54]) + ori_dist = ori_dist / ori_dist.sum() + + # Schiller 1976 + cov_mat = np.array([[1, sf_corr], [sf_corr, 1]]) + + # Ringach 2002b + nx_bins = np.logspace(-1, 0.2, 6, base=10) + ny_bins = np.logspace(-1, 0.2, 6, base=10) + n_joint_dist = np.array([[2., 0., 1., 0., 0.], + [8., 9., 4., 1., 0.], + [1., 2., 19., 17., 3.], + [0., 0., 1., 7., 4.], + [0., 0., 0., 0., 0.]]) + n_joint_dist = n_joint_dist / n_joint_dist.sum() + nx_dist = n_joint_dist.sum(axis=1) + nx_dist = nx_dist / nx_dist.sum() + ny_dist_marg = n_joint_dist / n_joint_dist.sum(axis=1, keepdims=True) + + # DeValois 1982b + sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8]) + sf_dist = np.array([4, 4, 8, 25, 32, 26, 28, 12]) + + sfmax_ind = np.where(sf_bins <= sf_max)[0][-1] + sfmin_ind = np.where(sf_bins >= sf_min)[0][0] + + sf_bins = sf_bins[sfmin_ind:sfmax_ind+1] + sf_dist = sf_dist[sfmin_ind:sfmax_ind] + + sf_dist = sf_dist / sf_dist.sum() + + phase = sample_dist(phase_dist, phase_bins, features) + ori = sample_dist(ori_dist, ori_bins, features) + ori[ori < 0] = ori[ori < 0] + 180 + + if rand_flag: + sf = sample_dist(sf_dist, sf_bins, features, scale='log2') + nx = sample_dist(nx_dist, nx_bins, features, scale='log10') + ny = sample_dist(ny_dist, ny_bins, features, scale='log10') + else: + + samps = np.random.multivariate_normal([0, 0], cov_mat, features) + samps_cdf = stats.norm.cdf(samps) + + nx = np.interp(samps_cdf[:,0], np.hstack(([0], nx_dist.cumsum())), np.log10(nx_bins)) + nx = 10**nx + + ny_samp = np.random.rand(features) + ny = np.zeros(features) + for samp_ind, nx_samp in enumerate(nx): + bin_id = np.argwhere(nx_bins < nx_samp)[-1] + ny[samp_ind] = np.interp(ny_samp[samp_ind], np.hstack(([0], ny_dist_marg[bin_id, :].cumsum())), + np.log10(ny_bins)) + ny = 10**ny + + sf = np.interp(samps_cdf[:,1], np.hstack(([0], sf_dist.cumsum())), np.log2(sf_bins)) + sf = 2**sf + + return sf, ori, phase, nx, ny diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/utils.py b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/utils.py new file mode 100644 index 000000000..9198bf809 --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/utils.py @@ -0,0 +1,32 @@ + +import numpy as np +import torch + + +def gabor_kernel(frequency, sigma_x, sigma_y, theta=0, offset=0, ks=61): + + w = ks // 2 + grid_val = torch.arange(-w, w+1, dtype=torch.float) + x, y = torch.meshgrid(grid_val, grid_val) + rotx = x * np.cos(theta) + y * np.sin(theta) + roty = -x * np.sin(theta) + y * np.cos(theta) + g = torch.zeros(y.shape) + g[:] = torch.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2)) + g /= 2 * np.pi * sigma_x * sigma_y + g *= torch.cos(2 * np.pi * frequency * rotx + offset) + + return g + + +def sample_dist(hist, bins, ns, scale='linear'): + rand_sample = np.random.rand(ns) + if scale == 'linear': + rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), bins) + elif scale == 'log2': + rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), np.log2(bins)) + rand_sample = 2**rand_sample + elif scale == 'log10': + rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), np.log10(bins)) + rand_sample = 10**rand_sample + return rand_sample + diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/vonenet.py b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/vonenet.py new file mode 100644 index 000000000..ce2d54f9d --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/vonenet.py @@ -0,0 +1,68 @@ + +from collections import OrderedDict +from torch import nn +from .modules import VOneBlock +from .back_ends import ResNetBackEnd, Bottleneck, AlexNetBackEnd, CORnetSBackEnd +from .params import generate_gabor_param +import numpy as np + + +def VOneNet(sf_corr=0.75, sf_max=9, sf_min=0, rand_param=False, gabor_seed=0, + simple_channels=256, complex_channels=256, + noise_mode='neuronal', noise_scale=0.35, noise_level=0.07, k_exc=25, + model_arch='resnet50', image_size=224, visual_degrees=8, ksize=25, stride=4): + + + out_channels = simple_channels + complex_channels + + sf, theta, phase, nx, ny = generate_gabor_param(out_channels, gabor_seed, rand_param, sf_corr, sf_max, sf_min) + + gabor_params = {'simple_channels': simple_channels, 'complex_channels': complex_channels, 'rand_param': rand_param, + 'gabor_seed': gabor_seed, 'sf_max': sf_max, 'sf_corr': sf_corr, 'sf': sf.copy(), + 'theta': theta.copy(), 'phase': phase.copy(), 'nx': nx.copy(), 'ny': ny.copy()} + arch_params = {'k_exc': k_exc, 'arch': model_arch, 'ksize': ksize, 'stride': stride} + + + # Conversions + ppd = image_size / visual_degrees + + sf = sf / ppd + sigx = nx / sf + sigy = ny / sf + theta = theta/180 * np.pi + phase = phase / 180 * np.pi + + vone_block = VOneBlock(sf=sf, theta=theta, sigx=sigx, sigy=sigy, phase=phase, + k_exc=k_exc, noise_mode=noise_mode, noise_scale=noise_scale, noise_level=noise_level, + simple_channels=simple_channels, complex_channels=complex_channels, + ksize=ksize, stride=stride, input_size=image_size) + + if model_arch: + bottleneck = nn.Conv2d(out_channels, 64, kernel_size=1, stride=1, bias=False) + nn.init.kaiming_normal_(bottleneck.weight, mode='fan_out', nonlinearity='relu') + + if model_arch.lower() == 'resnet50': + print('Model: ', 'VOneResnet50') + model_back_end = ResNetBackEnd(block=Bottleneck, layers=[3, 4, 6, 3]) + elif model_arch.lower() == 'alexnet': + print('Model: ', 'VOneAlexNet') + model_back_end = AlexNetBackEnd() + elif model_arch.lower() == 'cornets': + print('Model: ', 'VOneCORnet-S') + model_back_end = CORnetSBackEnd() + + model = nn.Sequential(OrderedDict([ + ('vone_block', vone_block), + ('bottleneck', bottleneck), + ('model', model_back_end), + ])) + else: + print('Model: ', 'VOneNet') + model = vone_block + + model.image_size = image_size + model.visual_degrees = visual_degrees + model.gabor_params = gabor_params + model.arch_params = arch_params + + return model diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet_tutorial-activations.ipynb b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet_tutorial-activations.ipynb new file mode 100644 index 000000000..cac6d513a --- /dev/null +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet_tutorial-activations.ipynb @@ -0,0 +1,352 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 83, + "metadata": {}, + "outputs": [], + "source": [ + "import vonenet\n", + "import torchvision\n", + "import torch\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 84, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Neuronal distributions gabor parameters\n", + "Model: VOneNet\n", + "VOneBlock(\n", + " (simple_conv_q0): GFB()\n", + " (simple_conv_q1): GFB()\n", + " (simple): ReLU(inplace=True)\n", + " (complex): Identity()\n", + " (gabors): Identity()\n", + " (noise): ReLU(inplace=True)\n", + " (output): Identity()\n", + ")\n" + ] + } + ], + "source": [ + "# Load V1 model\n", + "v1_model = vonenet.get_model(model_arch=None, pretrained=False, noise_mode=None).module\n", + "\n", + "# v1_model = vonenet.get_model(model_arch=None, pretrained=False, noise_mode=None, image_size=32, visual_degrees=3, sf_max=5, stride=1, ksize=15).module\n", + "# v1_model = vonenet.get_model(model_arch='resnet50_ns', pretrained=True).module\n", + "\n", + "print(v1_model)" + ] + }, + { + "cell_type": "code", + "execution_count": 99, + "metadata": {}, + "outputs": [], + "source": [ + "data_path = '/braintree/data2/active/common/imagenet_raw/val'\n", + "\n", + "bsize = 16\n", + "crop = 256 # 48 256\n", + "px = 224 # 32 224\n", + "\n", + "normalize = torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5],\n", + " std=[0.5, 0.5, 0.5])\n", + "dataset = torchvision.datasets.ImageFolder(data_path,\n", + " torchvision.transforms.Compose([\n", + " torchvision.transforms.Resize(crop), \n", + " torchvision.transforms.CenterCrop(px), \n", + " torchvision.transforms.ToTensor(),\n", + " normalize,\n", + " ]))\n", + "\n", + "data_loader = torch.utils.data.DataLoader(dataset, batch_size=bsize, shuffle=True, num_workers=20, pin_memory=True)\n", + "\n", + "dataloader_iterator = iter(data_loader)" + ] + }, + { + "cell_type": "code", + "execution_count": 100, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([16, 3, 224, 224])\n" + ] + } + ], + "source": [ + "X, _ = next(dataloader_iterator)\n", + "print(X.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 101, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([16, 512, 56, 56])\n" + ] + } + ], + "source": [ + "activations = v1_model(X)\n", + "print(activations.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 102, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYoAAAGKCAYAAAASfgYQAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOy96a8lSXYf9ovIu7yl9q33vXump2c4O4dDcRMl2jQh25QhyYZhAwYMGP7i/8iAAX8QDMiWbNmiFkKUSIriMjOkhrP0dM/0Vt1dXdXVVdW1vOW+ezMj/CHOOXEiMm7e+6peLT2KA1TlfZmREZGRkRHnnN9ZjPcelSpVqlSp0jKyD7sDlSpVqlTp0aa6UVSqVKlSpUGqG0WlSpUqVRqkulFUqlSpUqVBqhtFpUqVKlUapNHQxed/63EyiTIwNvy0De0thq2l+lZT1g7vP967pA4DA7G+cobKeFXe0DUP1/m0WarKOQB0zXVcR7zXt3TNmVAWgOe6Wm7HY8gKbOiaMTJUMMbwSemqbRoAgB2FI6wJ/wA0fI3GzVoLo34DgBlbKW/42AAmu9fI0UjfrPz2sZyJ12UQpRSNFTwcDZbxPH4uDCwA1zkZF0+/XddxJeHg4pj6FvGa43dKp1onbXlH9bfUdhfr6OidhTb5fZjYR3lFJnlOYwCeRjDxGp/jcYQxanz7YyVzm865pWNKdRXO+exUqIi/Bcjz0Q8Zl67jieqkvEcYb9d1cTx4HDsndcT6qBvey5yVcVfluN+lOd97JiBZBUrXh+qI76VwjfuDUpv6mXgO9O89LOmW4rdMJ6w6b7My0OPW9esdGJfiOK/R11XPaFycE7FsXB/1EQBuXLxTbLZKFJUqVapUaZAGJYq4pXkwz+BIGtDMmVEcmj4urzdy2vy3YeaHubnYJBeHNwaWTzJHyuVbD0e94t3PuyhROGbkVTdEUrGRe9Lc1X+sxM/uvOtJFN574fjjDUjZEihuj69n5XMO13svQmo+9B76fcR+xIrX4b2GKXnf0lTaEWOM4nBj0zkX/qhR2i/Noa+e46Vn0udc6ZGze4rfkpYe8jpK41iQMmIl6reP3/Jhv+Ao2Kzm/PVciF3sn3v4JCIk0m9HT2+DVd/Q8EaxTgcKDfSnZbkferPxvMnwwyBOIKPH3pp4E6Ko6UZhswg3R1ULqzZEFeNVfaROk93D9T+MR+/FP1xKVYJq8/DpJEwW29657Lf8zCcyfZRL3gGfflDrs/c+qhYK5x4dyhi2ZPzUt3HYWo/gOddmJg/Rj1XqscN+w2XVWsYIed9b0x7JuSB7aWFVLjAQy6iqnipVqlSp0iANSxRaHDDZqXgFCscNR71TpRIhnUp3XQ0MpbhqCiSZEmil2rZNWoWBCZKJIud9VE3xNRuByZzTfTTFyQdDRoGx+t3m41ES8qNxQrxmtGDh03NOAdEmK6/rS49H916OQvX0qKqglqme1pnWq1RPUQOwXvliG/l3vmYdyXgPPMu676O0fh3GuCWZH4/IFOCxLfbnEJ2sEkWlSpUqVRqkFWA2X/bRZNKkHJXxRrh7S9esAgKYu/fGi1RhM3s446OMkZoOphyadx4+YUsBY6JpIpvwQjg8EzkdOmetElq4nNUcEv+Oppqae5S2s90456x0cSRceB/biRyMYrON+k1HY7JbTbxHTI55CJxZaaa8lES3HRvT2EOU5ui9eyOGAbmk5wyA1GIW3scmHI8zvDLXo7kjGIWBcfSeCVy33qCHficMXgbYGVMYeTWm6qpwYZn0EGRRGlM1tnG4oiwbzluZz9rM04oJpeouf1fySNGowtBvkYS1NCXTQ4HCVp4uHJ2S+dRcY6OEIQ5+pTTNJqJ59SsonWL87Sh8UgoWzsm1wqmkv+W+r5IwnIlqDZ/9SjQj2QcZ5vVQ3el3nuAcpt9Xp+dkDi35Qn8Kj1uG9HkhVuVWvOcqUVSqVKlSpUEalCjYX8eY6DCmZAR1zLkbJzuZUTtm3LNCXSUzNM1d5xymtuISKyaFaWhIBQBM42MbXM4q7l/ORc6xoUo64XiUJQOb3pX5m+Vklu3sdLnHLpTII+dIUDBx1FeGLEz8AOdV7GsuEEEzdJFb7yu+jeIUl1fnl5wDWGqNv2PbhC35BNgqdrz8xuKYmkQayIE1/dDMPTq54r3Vl9Q78OpZlHSipnPsSj5uUVLMBcn+aKZ1LZdd+2VyeuD4ih4QebJS7/rnkr4WbolGXyukorylAsaazrjexQKtO46FD0v6EcuUpOHD1J9CcOuNr6bBjaIRXZGNXrJ0B2t5YI2IaiJm2ygqeSWzHFZ84YnAtvxA9GKOBtxG6s7VY3CpTwXAk4afi8V7Ku49TJOpBbwpLBjDk6DvV6JUEFImevkeJQAWncLLlZU/mvIkCeqgbBMpgM7Q5bLNPWhJ8mv9cnB9P4pk8cvNb++B9HjnY++hnk8WGn0te5akT+kN3ve3p5J9v/YhyftYIqM35gFKQOKCAcID3hKWUpyqJjloGo6KYGCihcrPDfH7cz56VdvihjVcC5DO9WKpFYtPVT1VqlSpUqVBGpQoRgwqdh6NqHpSds/DwNN2zkCzN5FL4J1Ic0zr7IapqZlJzuuKBfC0sWbBtA2iKCEchxFwMOdCjFccplXcoZYuAAFn8/4OPZzNdmyDQhygI6DINPiUq6dzd91mgRvp19/nWozTKqRCddqBL7soHuAFzvywlEoKEajtt65UZazmUqKOzA/RVJlYr1JvxkfJxmOJuXUatyi9P5o4Ur/7Gscy8ZTUU/jnwNR7SKVajBN1yDn/2R+hMt2LM3GVKCpVqlSp0iANShSb4zEAYDabYUy/JTgoc/QmwgVe7fRGzGhJt+YVR8nmeyV7LqKU444gcm4GqkFttliMIDjQcRynxkm5HDAQM0itO+dgoh4xwqmOGZF1XUsUPY7HGInqKrdpHM67/skjoBLnVeauypxGEk20cD1ibYWou1pvX4RF1Jgjdbhbhw4NUOrwGwVMKHZXYVjajBEZRtG7Uc1/Fe0Ydk0uLouftUqffAiBokiFCBQPkaIeXf15yCpY0xC/7RhuZf2xDVWt7sCDkMyiQVBB+5DHW7sLOoykNbhRfPvr3wQA/PBHP8Cd23dC5Y1aAADYJk5aCSCn21eevQ3peqLdVEkEj796D6KL589oo5YpOd2oAtwmq54yYNJ4r+rw8bZeN/ViQmf0xBl6AcVLy8trlcQ639EqMPtuqai6WGOuljYb/Z7S+nz/HHA0H8VRLYuaWcCSd1FiKA7Z/CqLnkNW1j91j1XeD4obcqT1vappPVIGIr24Yw+Q1lF7pRv+w1V4VT+KSpUqVap0TzQoUXzvL78HAGgMsDGdAAD8OLDos/kBAKBrvaB44xF7z+rdKVxrxiPhDJ2yQQd41013/2X72zLQ2DsIiq05VpuJ/p330j+jAG5uNYLZdD+M+JOI96y1co9TgKt0vG/rKNe1tzRLYDFxkVbdMce6HLDz6HMuunwONq9izkrAa1TXaHEte4+JNEUlWEWpgGht9uoy6cE4L2PJIHaMHqvViXfHeRXDWZdmmY9SnDYCkI4j/Zm8q4IpcZQC1Xs5JDvvXSYqJ0JYn3NdFU1VneQbBu8d7lw6ufT960aKTVR7KH/7q2NORTVvPJe1o+8rSC+5scPdUD8Cg6ZUxZase4XvdoiGPOeTdzDoq7Z+e1WiqFSpUqVKgzQoUSzIDnR+sBA1sUUAtV964SUAwM6d27h+4zoAwJNTnoNHMw1cckfs06JtVS6JfAfTeITmkJbvdD5jy4zCKBKz1yyejW0MBK8QyYdj6XgBsZmcMdHcVmxyrdxrhWuKe27OU2Q9lWOf4ypIFCvMbu83lRzdhriYqBem8fFJoXBwUepMY0ghucepgSw5/h2G83PeKY4x1qF8Srkp8GD3JCxVIuHaeuPBEgbu+r0l49l7B0ejz5aurfTcf/TJyZtT0knEtftUlIDuZw9L5PGwsYl1aXCj2DmYAQhqp67j/MVh83j7nXcBAOdPn8bJ6RYAYLa3F260BvNZKNdMwxfYwUgGOpMtpcbYQ4t7YtsuHzXUBqTqylRPxqucyNwLnlBWLWxeLdSycHFbRgVe401GWVgUwcN84S9ZrvQXB33pQc3jPIFQcq20gAwsKiXv5GWt9hZE33+fpQ3rKID7xKKt1Dfpo9oE0q71yKjLj+RyUFA9xUvLVRufNVoZolxOLi+/7P577Bke8RkiVFVPlSpVqlRpkIbDjG+EyzPXcRw/NOQ0MSau/OrHV7FBXNaUQNnJ5hiWyu0tFqGhzRHmhHAyiJsDzYDiZDRw6Pu/ow+G3okVV09/m1zfYPu7oyOTXwujYgeyesrIswsYDyPOI1y/kYCBWlJRaqMilmiyYzz/KKuexOBAAYJDsZjkTSk1U1+V1C+XhNQr1H8YScIa2xtDo0Lf+xIwXxz0tB/GxbmYJ3lSxXVggPV8IPSz5XPB+xWBENejnyfVE9M6QH7JpyaUu799KxJPQFOadw/mHazzHVWJolKlSpUqDdKgRBH1tj7GKqLoqgddCwAYTxoBlg8WQWLo5h7HTx4HAExI6X9z7w4a9tIehWadC+WtNcJ9S7IOvckphy0x9/LJIblBR82UCIxKouilYtVJaKTN+ENCW7O3tuJwY2IV1d28sjU5ldSDmvuWnUjv6AHhR0IKI8hr9fJfduTfLn0/xeq1m5GA2gr8Z27vCBztdKtDp5M5kYQyh9Llx5NrQBRDra68mJi7DuEFCmZYVt0yyDQ309XlevDQPbyKu5UCD9nKXZYtaSTWoMJjFKzEy633FgggCbMtc5Ktc9Z8tgF4bWUNK15LlSgqVapUqdIgDUoUbAFkVNrJ1pInFUkWHTw62g1HhDksABzcCSE/jm8Hi6jHT57D/v4+AGD3YCfUQfGjFosOZhLYddb3LlwnJosc7XVkrEg24uimuD3T4wiMmK1ymtSwm3M4D8JKOMdFEx9aSzaSNEcSHGlO26f9Qcw9kTD7YiXFdZiCnpT/dso5jUMSQKXQNKoOSgIl12K/1uHeQjRTJP2IWILrYzxOh0agNjsnDnYq8Je0weWdVzlRo9BC13wiyaT9ODxHmpdz0Oax+VgpfEH1uz+foMKEUR+181bmxGWV+a2WVKXeNRzdvPcyFzVmo8QdOhOTheWmnx5evY4oLeUoRyJ5FMoPMKxR6l/h+Fe636zBMZekkjIesbIqJCtEAVAaSjLWr0GfU/X6OMf68003mmoEjFF9kvJuSYurv4f15azheoZVT4VzMrnUB9Bl6iLrnMR1urW7CwDY29/HeByaO7F1LJw7CBtH5wDT0kQj724LI6A3t9kZEz16uS0Z57jgSd+8l80gGYYcfORkRS7mUDbKn8LydV7t2/hJ5coZ773SF3HXCi/BDFxf9vLvg9i+6mMuAcuHrb8Ucyevpa8A+OxRT1vjPfLhfeAZ5MDMAM95OnmUWj1Fh/VvqZSTAb8co9a2ZfSgjA6q6qlSpUqVKg3SsESRsuHh/4IXKp/pTJQKdL7t8MPBEQDe3glxorY2NwEAk2aMgzaY0Tq+cdygpZs7Pnqg9QyAJ91KgFetGelxUNo0jkVeVpsYL2KwUaoiI89C1xoTzR0zRNA7o8aNcylrkFqrjcqcgtHllIPe/aJc5TRsEquv9+vIBQ8tUQx6GycqrfTaZ40/TVVK4XikXLbJwPfQQKFcVNFE01q+1Jd2Kj0qZLLjw39RVaKoVKlSpUqDNOxwR/uIswr2kmQSEStg/JL3v4V3wsE0DYXw8B6WOPIxAQB3ZkGyGFuLzelG0nLnOixIeuia0M3OAguJsMrZhGLfREes9f/ZbqxjSEUogZ4l2TajxtkHzF3tqn24yyspxYvzndI1mlS6SP2pzNJrR01r6TQVRz+EUQw62SlJISLWLp4T8DtKDTHj6GdTkoj4b/hlrYW3mWR41G0WJM5SFOBIPLbmaBjVDMAf7OMjQquj2T6gjqygdTAK4MHgFCs2ikAGBh11piSC5GB+0zRiA99KfmojHw1nyZtMqHnn4WYUV2oUwplvTiaY0r0z1yZ1AYBjKyZlhRLjKRWeQaQ4IwC3kMR+0rYg6sPjb9HFK078J1KRPvyZfzwmNXvh1tb5gNQm8iDUUEtpSJWkfhcx78K13mLmBwDuISeBR4nWjCl03z/sfIF5VFa+SmtS/hEVMkg+YKqqp0qVKlWqNEiDEkXk0G2wQwdUNCRFmV19pxKtCLBrvJjRmjGHIA/lJsZgg9RRrG5ysxanT5wAAExd6Obt/V00xCUdsEk+m65aK92w4gCizXnZnjZqQLwkMFKgH/soiM8EoppkxNU6YdokBDnXgZiUJz66F71KAtgWuE3+W3KBSxnblyiGJKdlQHkh7s0QmJ2fc85LIh15lM4NSBSmyNG6PB0tTF/K4KqUh/aq1JLLzoWoX/fGlQUQXkufaRulUOhe33uXNJgSeKDaEhTq1fvp5xBPdLb9uoqe4SZevw90mCQ+655fZsJbinKwrP2ShFhMpDRAy5+N16+4Vtzt/EkUHWu336cqUVSqVKlSpUEaNo9lztt4ybcQYy0pULOwXeUerJqDEemEyrQWWDBnwlFZPXBr9zYAiBSxMRqhpfqsC3sc4xbtooMdkXe34CNdVNM2qj/cVrZNGngBpROJIjeBtVoyYOBSHjMmUJJ6zcPBFQ5JuWdxem45YD3Il2h8QUkn/eC/vscBP3KadYNh1jnawj6I3lBbR1DFUCypzzAdZb6S/9hpWPXERioqZEAp3Y6Qtq/PZB5joiTVEJotfgnGis8EL+gOFgekhhrTtYXrRO3RUMFtCgNiRhvYX8wBAC3piiajsazxvME4eDiTblSi/jBenk82DBefma27Gh+BZQa1jd432MKJx8XZGHJcD8hDoP5iUBBrS6onCd7nigvKMtWTcX2VFpT6SjYHb3ptWVZ3PSL2T8mGL/ySDhB5f9UwqiNr+lGU742H1aqnz9rm8aj09976ka4pj8L0r6qnSpUqVao0SIMSxYg9opUpp/MZy7iMBE+m3dHFuExjAXuJg2w8PCUPit7YMSggSwWdB2xHqh4CmDsKbT4Zj3CSYkgddEGy2F0ciG9EDOxnMLIj6iJD3S24tz0LVAMJgGjF38IItxvzgEdWLdZh5ZLzusJHR61S8q4ugtn87F5dd+l95fr7EksxGJ6q1/jYVqU+rWIwYzDD9Qbw50n1NGSO/NlTQYn+Hg97xagSRaVKlSpVGqRhh7sFhfce2chF8iZXci7S+nqvfiOAvcpqNT06gLBpOXbGC4jtfOD4GxhMKOlRR9LACGwue4DFQSi3uRVCm29sbWKPvL9nhF90nQdHamoNOfIpD22xomVzV2MkhlUCXLtUynAKzI6BohI4O16nZ/e9gcCh6W55JJEQENNqxnNUJhE34rHI24glX6ZfVWbA2sxVwUJ3/QRCIi4O2Av7iD/JJSx5liWDakzZxPahcKyZM13JoERjJr3osQqZLydEKhitLCnyIOiwTZVMYO//e+rP71W0rI+6jlBGosut0b4pnCs1vl4fgSpRVKpUqVKlFTQsUczIIW5jgpa4+m5Mewtx3M56kSQkoY6Jpo7ikNbFqKqLDAgwxopemi2tRsbIb+bujQU6G6LMzkah/JywDQsLS4DE/n6QHjZGGzi5GZz2jtltAMDezh7cgnCQSbCY2mF8pJuLFMO0gIvbKT279YCEv6KHWoyIK3eQ+EUMgTivslaISr6JWAbjHA2PKXqSDRATF2nHO5OJI9ySN5DB9wpH8Sw90NF5JxKFi6BDKO6NepbYDo8zJyIyLpo1O5XaVPrFsa+Uox4nXPKOPSe9ShrEz8l/xvEzfbvaxGQ7N3nmuixPJn3FmPhY8WSqGkbK9cXmGX+yEYdDyrUbOBkroxNt2bzR5VSKPBykQJUECiThm2ycRdJSAiG/Ym3tpsRd/doASF+X59ZYLpX0cc0+ExuknVTeNgVpIPk7b0o7Nso7MIMSxHBoFdXftV6VwvTYYk8ntOnVH8ja9fj0cFvEO9M2dX95HupGlmPK6zgFMg1uFAsyY13s72NEPgqycPHReVEDcaY4E58geQSfLWqlTsuGkRTrl+dAhYm6hBujlzVrZxgfhEc8uX2S+r+N/d1ZUte0CwEJN8xUQqF7qt9ahwNZXKkfIy8LLX95DFx7ExdhNLyxWHTSN34infKIn4XNavuLw2HF5XsRruOwK2WNUh95cW2nYj4uBrmZbAJ+S11LgNZMfZVeWOOJinqsowMBU/PR+F4eRDDHQ1E2Z0rZ4eiv9HpJF6dVdz9HoDfT8rG5S8p1zPdAqepp2bkH8y6q6qlSpUqVKg3ScM7saYjkulgsMJ9TdFcEdc3YhOOosViIY1TMViQqEHFWizxkVBVQO05xrkno70yN4SJol3NxxmQFAdhmhJ15SMW6T/3fmmxgcjxIEMcnAfTe3wlSxPxgBjaVXbigvlqMOrQmqLtarcrJHK+MoPBd5OhUXCDS3EUjAA9IECnLqhzuvpX0rJqKMWVyNZ6N/fKarZdjes54H6W4nFN0XjrFUoTrnFJHeTn0TFtZ06Md7pxSz2USpC94cMcHB4YFCr9uwXuiFNxPZeXw8xGVLNRvDWqXONZ8+PScy+efPne/aVU7PZXZIetapaoq1VGS9kUzMtCRofD8eZ/ycg9LqqsSRaVKlSpVGqRhMHvMuIRHR6anjhzc2IFsNG4wpnItAZPeeAEJtdlelCjunYzo85kLsLFeZu59BzMOZzkm1I7r0CyCyewxKn7m2GOh/GSB/f0QX6prgsS07w9ieBFH6VrhYSyBsQzqS+uNJGjSTKegMx1fUwgjmxUzN7IE7Mw5kVWmpQrL1JUk51yJM3Gqney6Ue0nJtAsVfac6+I1jWn06lXnDs8sPUwW/tHV15e02JEjPTx3+llwWCuhoHmvw/Nm0l8YkHtrO8H0hsutQ+sA7g/qnQxuFHNKGGQtsEG+Cc0sLJbdPKhm0DWYmCkASPjw1nfR+kBN19xOX4Np8fnTgeDryC+LWB3tiyXUtyxITiwLPKl5Ogd4UiHdPrgZzu2FTeSx0+dx9uwTAICZC5tJszXFxSuXAAB3Znvh+bpO2uhIVXVg2bvbx6RGyvrJNqnwpiery8Kd98sFgNz2TXqEUvv4MAhF2/o1SKuKSjGc+J1GKykNVKeLvfdQcaJiXb1sdn5dP+KhjpvVu+c9UJI8ytr+uUd4IV1vU4iRCXpqppX33l9aezNLolih9ztUJv9lKiLRxR2qb6UMguvQatUTl+ufe9BUVU+VKlWqVGmQhsOMMxCNCFRPKVprQztx13WYz4OU0ZhQ3Wg8itmB6D4HRG6TOOeyGTFzPq7vI+A9ok0+1U4cZIIvqyRCHThSLVVlgRahv5Z8H/YRJIXLty9hjuB38dSTTwMAJhubokZzNFwfvH8J+ztkYmvYtDXUMUcrz8wcQ9M0sd80pl3nRdphQcGVbJ21/ohVVE08x5JSlKwUsJZx7UG8jmBmGA4j2HRkxuO4s7STJuzJI78mrYZzkh87clwi+ykzTFvg6DKNHGDLJqg9jswMJHhZAlb2PGPV79zOfXk6U643qkHze6IPRLxX199TK67JOg7FNEpGtldvlOK1tDEEtK42t02pl4hKSV+6nXVMwA+rYtH1lvoxVG9JVVfqT2ms5N0OSP+r+h1/c1sYPPcgqEoUlSpVqlRpkFbko4jcp3hVcz4IirlkbIOOsIx2FnCLsR+jGXOmINrVOwc7YjNQ5qpDEWsVt6LajMwPiwrDD+MLBTJIIznXkUSxsKHfxne4eudjAMDexTsAgHPnnsDOXrj+1FMvAAD+7u/8Lt59610AwOuv/zg8+yJIKWM7wrwlc1rxnjXo6LnG4wk9s0PLwHYPmOpzpF4/Q8YJpg/MdapIuLGyiBMVuCCNhwCEUbBJa8diVfQ+1easpciz3Ho/30XsZ1oH3fHwVOGrqcjKrc/eBWfm5XjBgwOM+9x9qf0HgUuUHSzvjmyB/e6nki2nQn2Up93DpmGrJ9FJQL4FDr/B6qOA9JNHNhVfzA4w6khFNQ5N2GaEtiVAmWKEGwaavemD2TYFcbgjcf3JQe/lImJWhRBvLAsKDoiuRdOFhfHmnQBmL7oWu5Sg+8q1qwCAt376Jr7+C98EAPz27/w2AODt998GAPzsrZ/h+s1PAQAb0zAu87aVfrO3u3OAtWxVZuWcHgL9fGXficIDyyZS2jb1+rx8w0jcEnqRL7wCrNVG59KXlbzPfPPwqo0c6FYPpjfJEvVUFgNr7FEvAqX8ymveKb9KC/TPo/fzg6RhSyH0riXnxXJvuNw6tG6Y88/Ke66qp0qVKlWqNEjDqqcupgwVfwFWH5kIaDZk+jliCaE1aA/IjHYRuPXRdILRmH0PQv0d7+Auspg2Wrsigtj9vokxJZuiphGS4lF2c/SuOkKpx01QB02bBm0bQOmOfCb25/twI3p2kjxutjfwr//8XwIAtqfBG+PLr30DAPCf/K3fwocfXgYAXL56BQBw6ePL2NkN9XKY9NEYWCxY98Ydo2dxvicneXgFbEdJT560p68xKKWtFe9rdeR4eRK4TiQFfV96WxiQ2F/xo8jFIqVmEu7JxfqkPKCCAsrj9frxsCmXcoKK9DDShZdyD5vDHJK2mR5kf0pB+Y6Siqo23f7S/iyv627bXaeeR0naqBJFpUqVKlUapGHzWOYwLURX3WXRUjV/ZTmxT2MxItCWo7F2iwUaS3GiJMpsuK/1XXTKEtM+U5Yklmyyyno0dbZJ+omEbWjAElC8NKK+LQigH00aiY4L8vI+WMzQTCmkuQ+g97//sz8Mddoxnn76WQDAs88+DwD4wmuv4uIHHwEA3n4vgOA7e3swJKm0hFtw9NiymSz/hyThkZQ0/Eym95wSigg+Rq8VCSHiCyxZuI4BbCccP1/T0WPTNKZZW05fY/GhUJ5v817ulXDqvVEo01oYxRFwZ2WzyoKoOkDhMVl6vnfz2KMkHf/ps053l7hoGO9cjwpzUTCPvI/30MwDpipRVKpUqVKlQVrhcMc/VIIcUUFLYgWRAlrmag1AQWbhCdMw3qHtgt5/Og/SxgZx752x5KgWjQslUZIAACAASURBVGw6E0NhcFypxiOGjzCc64F7acXRjpMP2cikxqPV+n/mnIUNxoRjOHmySHI2OlJ1Gkch01oOc3K8of57vHP1ZwCAdy6/BQA4tnUCT5x/CgDwlc9/AQCwvbGNt94KllLXbwQrqRt7FCIE0SKsZadH78D7eufiOTZD5mizkjOjU9JDEtaAZUD+SyeNYmkkogMxmG90oJOX5NWcYEvgjt43/R0kEpPUrx27pCWvUrIyVsPSoDE9bgwm4jKSQAlls8d4TR4mtp2xdZxTJTwyS89DHGYfGYsYhI3zTmx/4/UEn1nD6Uy3yYmfYmn1YgrOdZKd18UvwhUs1XLJRqQe75N31etRod8l3bx+ZibLWopDYhOrxqp3VWkrIsQZ2X1ZW3TK3jXgpwRaE4He9c71OxhNzSPmqurNnHTLVfj4XImTH42pOMoCQ3bnq8ZyeKPw/QHrTxJlmy9dVGARP4SN9q6OTUTJXBajBqNJqvJxrYse1uSJ7BA9HqUtqt/pqE+CmaYQd+EJk2PwMKYNQqmsLKuo9DqaLWquIfWRd7C8eJNqabbYwZtvvw4AeO/iewCAF595Cc888yQA4Gtf/xoA4Mr1EHvq0uXL+ORG+H17N6i2nI9Tb8E5xEfj+O5b8oAnwN34uLjKAuxijCqvNiBPCy1730sMJ2VkoN9xNK21ci0mLkpHtv9HSkUTUf57+W15LWuXXF1V/PByMoWWwmZ1+B5Xunc6tJqupOtZEVApX1OSIKdH8brv0WJDz777SVX1VKlSpUqVBmltiSLmJMpEpMByh/Jc1Ecx3yoxXMQySS1KZVwHSyqL6Ygj0Jrogcb4kNo6OySXCMxO91ZjoomoVSVjaHIG3+loYt7tMXueGyPgasxBbEUtVvKmFr0L/2ktNo4FdVtHcbHevfQW3nrvpwCA7c3jAIALF4J66rVXv4Bjx0Pq1ps3Q9jzH73+E1x8/30AwAb1zS+cqJwsnVvQyDiruHyJEWVifmxORGQ7Ud1ESUKlmc1xaO+jekvORTGZpRJR73koR6ao1hAwXY/TodQv94eGnOCAMgPYz+P92ZAs7tZs82HQYb3Hj7r/snoUJIsjqV+9i8OYWz8oE9oqUVSqVKlSpUEalCgcgbfGKlWeSbl8GCO6amjmWqK7OnWS62CgTPFn5Hw2pgi0Y2vhGBsgdrazPiYIUhwuAHQJWBmBbivF6Vk8AAk/kj2vUYl3GBPuHJrpKHkWA49GHpk4aTYNNkZAJQGjnIfzQZIwFNLEuVb6tNcGqeG9DwKY/dY7b2J7O0SxffXzXwIA/Nbf/A3cuRPSuv7oRyG+1JWPP8JsHkKNHMwofwZhPXZk0VEiqbZtaQycwlkidy/5MwqYjehmHT9nfB88fAE/SSUVFkFsIQ4UlEOhBoztQ5QoIp6jpGExXzXxwD+z9x/+6JlO4FGWLooOdw8j1lPvR++PYlibSH1jh7vqt7zm1e9M48K+NAXWeO3rhvl4FGg41pPypFUrbq8MJwJK7stUC85FiNtw7mn+21iMeFElj27bNBiN2fInts17UsydDTnBdhku+VBZ9WSlPE8EWa4UYpWDvZ33GBU+Hr5lzNZJbNEDCNAtL72xvcnnHTCiWFAd+S3YNrQ5nYzRIgQi/N73vxOO/+GvcOH84wCAL7z6GgDgG1//Gm5cvw4A+NEbASy/eDkkWUJnMCJ1lKiPXCsLulhCeQNLAf84SKPkI/ImD+GUfLGJJ61Okq0b1bZAek48cCPyOBciFcxVgg88nUrCBACwxfhT/XNqXAoe84/y5vFwqDQe+Tk976hEstDyuQczr5LeHXGTdx9H7P5RVT1VqlSpUqVBMkMi2oUvnRYdg3BNKmosH02myrHWKtEc8iPaJLONb6h+5A2mdEPDkoXvwPoduxGcMrrGYGEZhKV+E9PcNEBDPhANmdNa40SdwebxjVXxpKQ/4cTEW0xbiltFe+jubIbpdkgDe0DqI2+Dqal+ZudY3aQGkJ/FGEl+JLbOChQWB/iOQ7OPJA4VOpYKGsH2W1LTbW1s4qUXXwEAvPTSy6GYC3VcunQZF9+/CAC49um1cF+7ALM/DGB7H0OJzxdBimnJm975aGvv2fy26+DFc5uuOafSvqaqJ9NG6cxr8SRTRzUwSxMFedu3sA+e6rm4PsTa9e33jTF9UNIYGJ53Kt2pdDtOcrpmexxglJSN+IJEPwob860fEqCVa1DSsPKLiCpDlqK5rq4XpbcYZr7gdZ8kvSrcu46qpNSm9qdgXyhTuOewpCWKoaRDQ2WSeWjlw80rkZ9xXTucBLDqGX0BMV9nXMIz8RpL54rSbZ+uvXGr+BBVoqhUqVKlSoM0HD225HIo2LTaneScwhL4nEJ3luE8Dl48kCEM20gkFbGStQZj8pju2LyzjdKJoUREbO5qYASctj7l9sJv/ou4a2Pl3pGlKK+LJpqNsjmtjR3tx7rXTxjFqqbAkQjSIbGKIghu6DkdR+R1RowLxqNgausbi79+/QcAgL/8wfcBAI+fCU58v/ytX8Evfi3kzLj6Scij8frrP8Zbbwdv8L3dXRq3iJ+M2lHSn861MDzOyrw4ZrmNnKjJuMZSrKfcGzu0L9U+OCoEBRMu0sZ54URqZIlZWXUMectqUP6RzsK0PoVpfb+A7gL+uU7V92lsNbAcHzGVXtMItPFcxE4fHXzhKGiFH0U4Gv0HbwYM3hY2DIfoVS11oZ/DVm8dHX+cdKYxvre4ms5LGIhRk3poe7homcV9CjE/pAT/X/Ky5D46KReflzPVsUqiMV5USPkmkmSTVno3Cbng4zlWY3Bwwo7r8FY6pxdSVq3xtVk7w2QrqOUsWTZdvxk2hX/++/8vRk249twzzwEAXnrhRbz4/PMAgFvkn3Hx3Xdx6dKHoX0yJIjbnJENX8KXuPhhcOQT46JlkxVXDN4cICRLbPKRKbqfm4YpgdmqaTZA8Lan0hr+5j0id6NOFcvh/jzbg6BCxMAHsRgObkaJRYFPTuqc1iW13roqM5mz+VqE/mtW2tAj8bNYxyqq+lFUqlSpUqVHggbB7POvBjA7iTXFXHsCZottWjjYCHDrc3xvAxbpmWu3vR3Y+KgOGJMaqIEV4KuhOEqWjr7x8BzPjVRQGEXP5VETQW3BqjiWlGVQPYLZGxQSfb5o4VmVNeEGup63cSdxj2wcpDhYIlFYeVCrolMFagXcNuK3EH0xjPiVdKL7McIJMzjI97WLTuJVLSiX+eJggRPkn/H800HKeOWFl3D8eDj3wfsfAAC+/4O/BgBcvX4dbTunZyAJoXPyu6WkVK7r4nVWVXHgQhdHIwkIx+9RBf5rTDoezD255gjAbNP1roc0voWKObgkh8M3cS6LulAZdYiBgpzjOGFRaoxpYy1gloPZJeC3RDKN7jOYLf0rlV+Tm10FZnPCrMQ34bCcssyFewezE0MdeffUr1KdpXPNah589TP268jvWe6VXsHsSpUqVar0AGnY4U556ArHw7u/4AGRi9WhjIWbYWbLG9nto74ttiLWoIjckJW0mhQRFSOMuJFWxSMC0DQj8L6n4xLlvoDlx2TMJHIQlri+8UhhFAzydjEtqWzSzE0qr0CJc6VsgyO32fSc8CJXGTldJ1iQBSiyLTsgeji0HYPNoY8cfRdjgwWnst2gOFqTMWaL4P3945/9CADwwx//AMePhVhTr30+OPL9xt/8DQDAzVu38BNy5PvgYogzNZ+3kUvxcQw0eK3HVIWLiqRiPQmco1T9D4OEc7RmOWirldCpMSedyfiuEvOJwnh8FmgJRvHIpOuU6bYao1i7So1lZBDTUcd6etSpShSVKlWqVGmQVsR6ilYfwjdkoTw8fNxulBlrHg8IiOExWNcu3LUHrJjQcJtOGmFueW4cHHP6YjEUHsE6I12TkBtto5ig0LnOA36UWrPECLdKh6qSwHSk45+YCfihWLDhOEnWKP27qDWjlVKfAfGKcyWpSCKwRknMqP4vSIri52y7VvAV5u4bjiXVecEoOqV3lphMlqUNgxt7IXHSv/2zPwQAkdoeP38Bzz/zPADg+aefBgBc+vADvPv2ewCAfcIvvHMxvpW0paSjDL/wHmhsmvY1dX5LLVgSyyl2dINX46y52rJ1UagjfwtRgpQ3pFlFgYLYAq2gz1bX86BF3iFG7k1ChHCslPya0ikPcKzeAzl3TyeL5Y2xEqcsFfhyCaHfsHa4K2ENJbV3nminFwIru28wttIaAkt6d4zx04Ou+DX5iJd6PfYi3dJzGtNLSxyr1O8slo8SMifT6kuorjBHiyhbIr2mGgyfF1HkS9+D+l7uhlbEeuIfqgUB5YicEqZL8aD4wZwyj2XViTI/jBBvbDPP+uTg0VIIb0tgsw7cJha8YloaFyn2nIZ1soiJ6kS1Gbsdr/FGIi0lICiXi+PSDzuN/kwwBvkHVaZYxlK2QAaKrbEwlPxJFlCx1lVqAV+YkfSu2jbGf2omnJ0u3Hf5ow9x8d33AACnT50BEExsv/mL3wAAvH8xgN/v/Ow9dOTNPRqH99IueKNVTbOJsIkh0LVFaT4apfExauKX533+TulsaaiNytyY9DPbPJKjSc4MdIF4gUJJ7e3MlWX9W5lwKwc1VQXl7aw3IgP1azXo8nLBfLT0fINVl1pbfmkN9VFyRYxKdIF0dQ3fKPrlRKUb55A8/tDrMHFORF4hvtv+JlAYoEH1mF47C3OnV7y/7tyrgrCqnipVqlSp0iCtALMDJaZ0iTkec9cph249oukpcwQ6VzXvjiwpGCVasahutZgc5XFufiHmdRRC2zewHJWWyjSjKH5aBtIdoqkg77ZsYmtUICg2Z22A8TiVXpx1icoEQGLaKSZ0gu5bND3uNKrehNgL2yupS8R9gxxBc95IqHZxGFMpXPsqnMiZlJx52BN5QWav1gCjSXj2G5/eCMfr1zEdBxXcM089CwD45je+gXfeeoeuh3KiidOsjJgwIs4PxHfhc+5HVIP3Tg/WOUxxmIcEVNcBh3XO8cPee9dklPJMP9N9bFJT0QyUr6Ul6aIpSJH6WiZ5arBeVFR9h7vkvgEv7aM0zBiUpgrf8f2gKlFUqlSpUqVBWj8V6pLdylulLxU9mhP2UHAIpxjsjKu1QAT9FBcpm6XTJ+kUO32xKtw6NJr7BoAOMGlQDRiYJC0qADB84QzQcb8FkLAw5HgVQSgbnhEKGyh5JQp3348wWnK40+ax3JRTHEorGCjr95UZcoajwETgWlK/+n6UV6Pa5WdoKI9Ft+gkSm5DKWq7eYu9vX0AwBs/eQMAcGzjGJ5/7nkAwKljIYXr+5S2dX6w6EmXzvskPSuTyD93zY1ppCOtpGTKedgoqIfrB30WOddp+n0r1rDK4W4FdgCkz9R7PmsBerdDLcncCDbyybkHaRpbCvuSCKvrVBJFoni3gBUq8ZlT70rWr6y8BjCKoTbW6dCK7nJTa1Y2iIfdI60ICqhUTnmDCRqUfYDOI1uLQ3hyiaed1qkXRq/UR1Es7PdJEgspcEd64frlk6jkHDOJ+8Mprm18lCScNAcn1J6lcpkXaP2S0ocPHp7ZNW96L1bqUGC5/igaUcuxv4iPKi/S9XC/nPfJBsHH6O/AocLjuSxlNjoX42exv4Z3XlZytlza39/H6z8MWfceO38BAPDFV18FALz//oe4/umn1Cey2hpZGcv7rxCKdL8XuKNQPR2unfXO3Uvbyb0FP4qjWoiOihi3dtA+EKkKVptxlgIA6tHqjaGsWeX5dLQMx4P8Ooapqp4qVapUqdIgrQ9my26bIo7eQOI6xR05RuCMmhMf4z/xrt9FE0rR3DB3C5UQSe3+Yp+fgaHaTI8FBO8dLF8nVrtrvfg8BG9uxTU4q/Vj0jf2jnZaDJfB4UPkkCXNqJYiChx0NJM26TUT63CxAVVfONco3wOWgCyir4XPpAxjlCpBDBBMBJ7zfNe2kXhO0SPfisEB5+JufJSnrly+AgC4+ektAMCpk6fw/LMB9L78cbi2O9sVVZZXUqZE4s0MBYrRPpUBRG/81L0yYonJoAYk+VxSVfo7mbZZPxLNa/9aTl5JvutKAcvj+axHJSOGw8ZpChYWd9ePo5bghuqL81oB0QXDa3mbPpbqAdEFrUbiqS2GOnHy5KORzrv0HeQ9yq/5mKtBvb/SsyyrTdO9zacqUVSqVKlSpUFa0+FO7coZ92Q8oke2gEFOpQccqJ9NVlVBHS8qd600UT3ec2zxnZHoscyFO+/ZN1q4Zudc3D05ZapKjdqLDmmMeDhrt1mjop5SY9kgqCr0/8L92sK5/J7IxHkf4yPFGEvx+fj9SCBfRM92o0eYK3SxDpHYVCIiLmrYgYm5G2WaGb2xdcfDub39AHjv7u7h5KlTAIBnn3kGAHD95g18cuMa1QHpDzsUWoq86TqWDYdpPd5a65H1mSFuN+O+V4CWvYirBS4u8rLL73uUqJRsqnR91bmjpBy/W1Yomuan9wWPu0KlAxVGn72jx5yW04DklMMvS8uV5uzhsboVG0UUO6U58UGIKh+fxR0wVt0rgQCjY0Quzjk4AXLFJ8MAxqQhK7zx6QaFuLA7qIgLsunEBEMcIcR6g5b/mJNFDwlW1vq40uow0jKeLl6LMZyzh1ILv3pJVjaWuCux1ZOU66Lbf0ykQ6U1OK1A6rgf8zUaM+/knJVN3kVA3ilwmjcNsYhSx65QXvwu4gaefoVxQ7eNxd5eCET43sWQIOn5F5+X93zt06vUtoPpWD1IQSDZIuoIPsrih114Z6t2nf7HBjUHCuquu+z7qgV46GO/HwD6Knro/cgW8tI1Yb6U1jIa2GjfitjvqO2N6x23kwcM1PrhFCTPNtulzGRKVqn0OSx7vraUbA2S6hPNV79v61JVPVWqVKlSpUFaC8wGCmK1VztrBnCHQFjJKai8RSrXilyNuWYVuCkCjcq/HXPSpmyC7kcngK4RP4QolRhhd1mzYRb0ozHhH+Ku3MAGaSh5PhVIUHwa4k5vhdNQUkl8BKGcidVxXDTIxmWUEIneT+lbLM9JnkpxgETqgiZ+TrKv77ySLqiI08Hher2JEgsdO++i1LcIqVbfePNNPPnk4wCAC+fOAwCuX78uYzOmOF4xkKNfV780QMNidmJ4IPo7l5QJweQGgMhSq0fAVa+j3hkqs+zaUfD5j1pu6ERFlKmu0/mavZeSSf+gBBdjtelvKo9Ppw0J4rK37pxItTLJ75JUUuq31jncg+qpShSVKlWqVGmQ1vLMLsV6KiUIERMyZ3q7s4OHBQOjKc5hofAFxo2NTRznuC7hwjkYLF1yCmSVCOHOo+VzhFiPrBUnPcMiBZVvRgZjMq71RpBxcWpjL3AbOgh1QN8wTl0zegx9co0GhB9PykhxxgYQsRKrHjMbZsV9lLkFr7EG+juGAadjx5hFLA9OkOQQ07Qizg/pLmMT4tEapIpQRcAeptMJPvwweG4/8fhjAIAL587hk08Y4M7kHNvn1otPZ/o/czxsaXn9sgYYvZ5kbSDvr9cnD3nRiTPeGkx48s0NmDM+bGzgYYDZQ1SKJiEh+5VKQ5QEeqx84Vxer0KRc3NaHU3XqTrWwShKFOuIeMW90BBGseqdVYmiUqVKlSoN0toYRR7xNV5Aj5Xy8Ooc6/JNxBpkh1dAhjDCIoIo9btC+oUTyLrh1AlmudvITbDJbAcvgIVNGQIcdB2sDVzvhJzxnDcY0TCZjp3EAEfluJe841pTzlWRxMwBYNBJaA2NUiC7Vx9Fq2xi/8UsNteXlhhXrwZambvmYFByX85oePVuldK3pzPno424STOhVK5dKyawVz8OVk9nz5zB0088BQC4cuVy6AfPExiYJs0z0MHJe+5Iimpg4oRmk22JBabDqKg31DvnZQT68blSpEiGI5tH90T5a18mgJTeS7ylX6bHMS7hIA/zEI+wWW9CPSxNWzL2ZM/kPnkNxcv0zRVAKnFBVQtmH8stNVt4j4pKK0W/UPkl5pqfw9BaG0WIVUQfT0EG6WW0Qmlw1EfW046oF8cfeLLcxgUymqexHkgNLKtmRD2m2m9IdeIdxCzVkmc2qZk67zBrA+A6omsbY4PJZAMAsCBz2q5bwHHyumwWGmvV5OB+ODSGNhv1oLmTZWLsKsPG9WsRljeKaJosYc+lBvUOtElspiYx8DEWlEz8qIqKsaHiMdOiFZ124wdm5F1JMMjkQw306Y2b2JxuAgCepA3jo4/DhuFGTlZ8CUOPqDJrdOwuz1kTKUmSuOkDJjqZELniqir3anWU3Nj/yoY+2nJ47EN8qZ7bHbqejn9PxZD0IzIvceEYWJySpD9pHR4xi6J0x/e96IumooldJ4rle+VQHs+kiqGhFe1RVDNFo45+f3x6E3cglst2CKMAY+v7S6uopUida63pzZ3+mfL8khVm1VRK5vfymbpKG1ZVT5UqVapUaZDWVj3dLZW4iRw0AhA9gOlv5+KGbRRH6rK9zYgIYiJHrBhAiVHURtVZZBRpZ2+iRzADrrN5yAd9YOc4PjmW9BEdepixmNA6q/btyHll/An6f8SOezU2JQZKGxk4lQ877Y8yY9VllEd2uNbPhczOfs759F7pY1o+eYSCGF4ivnVMIc0ba3HpoyBBvPbaFwAAx0+cAABc3r+KCdU3IQlhCgO74LZCH1s7QsfSIR1Z8rNo+y9N90ekKRvfg7BrJGFY2+Nw6QKVPzpVjG4nf8erQOp1TWdzKYMm3so+lQDgzxItk3qKBgKZU13JwS2RRtcBjI3+mxej+E0VY0E9QAOFElWJolKlSpUqDdJ9kShWhRoQZzI2s7S+xxFDp0LlzEI6NIhLWVdjvOLIo25WYjyJFOEl9ARf9GQSOzKNcLigcBJ7B3O0W5wxiHWRFg1xmR1z6Ak6kGExRS7Ax7hSuYRVEDsSzJmuOGXamhVPHeP0Ra7DRQ4mH9N4n6pbXkZJAjEaKip2Jyd+9AU54WE0RkOS3cWLFwEAL7z8EgBg57076OYzAMCEkys5jxGNc0vc2HQywpzwir2Oot5SO1P4aLzAY2CiOWOJkY6JpZQOesnzHIYGo5+uwTGWuN9V31xPeijXvLQ/JlTS7+NnUKIA+u8gTWxVgozzMfXqu9bjzPX12ylJG/0Yeg9feijRfdsohkQwJ+ApTUKn4kVxHdoXA7o8leM4KKJ5UnCUspuOaqZwnzMengyW+CU5RH8KSyD2mBacUTPBxnQLANDOw6LWtgeykVgjYQflWXKw0nqDHPAv05qqJ8eqISebHttc89hqNZMA2EU1k083De4AwoLq83o7L34WsnE6/YGsObmp3Hga8m+7tpNESLuUQe/TT0LCoxfPvIiLHwS/C086qIOmxcyG92bHpDocA7Chn2Oa2QvylXHK87YfqhlIjO19ukFoX4jSW1zv3a5HQz4TJXXUOnXci+opbhRRTfIw/SSOgg6jeloeDLDk6Xy0qifN6A2wFivVvEdBVfVUqVKlSpUG6a5jPd0LRUvL8MNaiLdv9HTWag+lohHVCl+indtGPw3hAlQeoiQUOksXxBnbJnLyLaksHPlRjEdjbGwGiWJGUVB39hYgwUO4U8u5s105SUyZ086lBq1m8oUyd8k6KDPJHsdY6E9MFCWnYopJ3ytOP5SKQl1a1mN+vgVFih3ZRhzx2bjg2vXrAIAXfuFlfHT1BgBgvwkSwszO4DksOUf6bVtM6D1MN4JJs+tm1BEHTzGsNFco6Vl7PhOF3nv0zBEHrTGVBHJYNcK63rND3+XhVU8DdRSkDe2J/PNAxfGlo8nKhPeyXHoo1flZVj1ViaJSpUqVKg3S+qlQhZbrKUscDO+KOmFQH4CLuRmE74lWivBKdy9pIASIjvpy9t4V7tZHFlDMZG3ENYRbb8OxQwvTEEhN1+Zth8kkOIJtbgUz2cs3LmMyCvWO6BgFFxfjsigm1fV0nLF9yevAz+tioncB3qHSwA6MvVP4Re7EGIBr6pKW6vhdSZsxHlRuHmtUeemGU1B+5oGuBaHkyekP9mB13sUBoMFcdMFE+ebeVZx7+iQA4CcX3wMATE9so6X4U8aFaLO2aWCPHQcAzKdBohifDHXOrr6PqQv1tT5gTdYYSYcrTofGROwlSazFkh79odL05nGAShCIpsOmMV31e+jcWm0WLvUwECVR6O/3biWK1dI2t1vGTIA+l7+8isONlXbMM5mMXOL215UkhsoXu7EGRhGqWl7fUWmCHpgfRf4bUI/nkSQQ4XM+u8/osB6ZpsB7qBDlRgrlYUPg+17gvuVNJ4TsAICOPLkXXQc7DgvRxlZQQVnbYDY/AADw3nRsGjaYprG95E6w8RySfpfUS/xN5tdMH7h2Tqyu5MPWG0FeRwHMTmiNj+eoiTdQi6gntBIsMqiKrn/0Hl76/IsAgI8+CvcZ57E3C8+8dWw7nNw8jc/96q8AALrHg3f3T9/+CQCg3buFhrLqWQLQfddi0YY2xiOZIDDkzh01a7yBGTlp/fIFSyIJWL3MqAH8jIPBTJ9VP4rVpAHr1cpUrxiLQTVUyf/I9DeiR0nlxFRVT5UqVapUaZDui0SxShTrX+xz0Dq+VFTbqIQ+Fkn5UC695H0Es1OnXKpPuNnYb5YCOlJtzRcL3N7ZDQXI1NLbRjzE24OgzpiyqSYaiMUs1+sB8XYWi8sYpC4+O6t8Sqqn0ONVJEYA2vxRKuufM0q95DJpw3uvxi2ey6z87ompVDEgo+pGxiiM8c6NPUzbKQBgSmqmO3cOsL0V1FHHtk4DAD7aa/H97/w1AOBX/sevAQC++uITAIDvf/Qmulu3AABdG95ZYyzMmIN2qaBQQnyOI1FakVYlnLqWfJX/TrgrJuTy+j0+egzjUirlc0/oMyhRLFuLkvS2sXS4JupvVsWqxEU9T/54X1K/1BHv6/tz9PtXUj2lfe2Xj20eDVWJolKlSpUqDdIDwyiSXTKLy+y9ArqJibOKKzfs8Rq0zwAAIABJREFUwW0gILbJ6ihJFsEXJQUmDWICpehAzZhC7EBHXnmtb3Hz5k0AwJiA7tY5WOJEbRM43NlBwCycc5hOGFy1dM5HkYbb1GGvo5JbDrkJMeDhHBsGkNSjHe4UiA0EcFh+d8wFuSSlKZfvSR5a+upJGaqcMomVx+MqCqxMItSJGp/Mig0gEhUbNtggRbTNJnaakDJ19FS4dnxssU3jfO54iAk1++BDuDaYML/+e/83AOAL3wqSxeTOAQ7GmzQe3DkHULRgx/PDeFiJTZ7pj42NHWfuMLCAdC9HrqXaFSB8mCQxefmHQbmJ98+Tw90yyj2zi054iRYgN5Tpv+NSYjddpjySy3GOfv2Fiu8DVYmiUqVKlSoN0hFLFEpvluvekjAWJQyDjnLCxPqkjFH5MPLtGUqUYNMUoxj5aE0VFcfRGkgukSTD8YMWrsWt20G3ffr4Nj0LYt4fDiVCJ+btXCSJCXnlGXVdAyhxNBS3jshRZyMidcScQyr8hoThYOmhi3hFojotSXj54DMZZZq8ApHoSXb9tk3yI9V9W8TYV3yOI4W0ncFsO+AQX/3bvw0A+PjTa9h/520AwLs/DbjEK88+jhnFjnrtcy8DAP7kn/0RAODk6bO4MqUQIbt3Qpt39rHZkvWaCziUsQt0HPuLcwpISI+RWMXxizRGYWNKQgYAOCeOfA09VWfi3Ernd44FDOu7+9/QXXD5eRWm8If+VESYUrr2bFocmawxUNEQ/+wHr65opzTFl0gDaUFteplXpm9UaxD93X8FBiox9EDHVRdKOMfqO5XFVb+OnA69UQxW5+Nk972FIC46LkWWY3A8Vd77COimQchSdDqKxgbpJ4d0EMXG3fbePQPTnbHwpPOyHYHV8JjPQuyhdhqGa2JH2Jtzbm0KUkfmla3vsEsAd0t1bI2n4IVFFhHfSVyi6AvC46Js2MUU1ifjy8+Xr8/eUb982wPbwgTijYSr9yo+FJ+LC5iXRSEyAWqeS/3xjabvVsdVEjcJE9+VbGwwYhbL6j/21bYjYPtEUBtNnnkeAPDcF7+CP3vnUih+MqiecMLD3Aiqp3/3+/8KALCYB3+K63davPY//NcAgI0zZwEAV77zQ1z70z8LbcyvAAAO8Kl4fJsFxfty4b23FuhoN+gc9xFoyHrByljR/UlQfLVB0zg4sWawiPMjXaAtAGu6fh09ZcDyL7MU28p7vU4olYnck5Z3uo4mzoW4QajvLPPBKC13ojpRjRZN6YfMTQvqnaGNYnCt1+VKTJTUwe/HF+7VADe/TwU8a504QEnOshqMAb9bZ2VwyxErgGhvgexb4+4X9quYWbQw3kuoqp4qVapUqdIg3R8wOwFpNCfKu34mUahbS4lvJJ6T2rFFzm+83NeLuOo1hxR+WVUxRy/n7dohmo8yl+08MBcJIWzf0/EG7twJUoYZ8e7P4PYI3SLcezAPgHiDBmP2ANahbYXzCm12WMijR/NYKuGiZ3anHO867i/4GD2po8rO98+pa30Mu/9eRGo2fYHZa7Ple6JU+mPhcTaZ4/xLQQqY7wbDgo2nnsb53/xNAMDrfxTey/d/8mOcaHcAALu3Qy7ujgawm2/j7f/z3wAAXviVrwIAnnryAmanz4VGdkPbB+0YB/NQx4i4ws1jATT3ix2MeYJ2LFFYWA45TxKkJ9VjNx5hRvNI8nqbmKpX8/Em5+iOTIdz9FSMspoWSK8rDrYUGTVyy+raEeKzRzmUOvZVGWwelnzy8+uc4/aS+hMpUUuawyD43VKVKCpVqlSp0iCtJVGkpn3rVVyCcvJrsdJ+mZJpoYfv6dein5QRrlb6aCGJi8RnynsJv8AAMOsCOw+BBzrh8oEDctA6IMlia/MYxk3QfS8WQWpYUP2NMWgk+VGoY382gydTTr7WoBE8RDh4O6e/nXQ4gskWrZi08rETXbljbMLFtJwx5EdfYirpLqMk0QcrBedQHmaJ9JfhPutyhAl+Qn1jXT+bMZ/GBC+QGfJfvf6XAIA77QLPfPkXAQAHCGa0V95f4MbFv0r7Ng85LcbzfezuBvziuz8NuITfGGHjxCkAwEtf+AoA4PYlj6kJ9R07Edp8ajMc291NkTRv3gqSzcHBAo4kCB5TjkGlHTP5vsZCJqUke/JeYk35OOAyRvI+5DtA8Zs5LK1nfmmWljXG9PFZLWXkMY7Qz+EQ7u2jyNE5rXep3N+sH8WUI1J3/z59T9GUP3ZMDoPRZg8Zz0mq13jLkAOfjkdm+uPXb2dVP4av37fERb12lRpoSPV0iEb6f+YLmIPCvpUKjCeETK7om5FbFnUAZmQZs7MbQlYfP3YaZ08GlcXlTz8BACyaqAIyY7WoAoDrxBpn7EOcoQkmsDT8rFrT9tb8s5N+WzVq/BG5CE6b1PPbqVHVMWbib9o81Lnii8j8P5LQyLGB8pe5BpU/KM4/HjbSs8eewPEuLNZv/vEfAADai+/iaerHF74dVFCb1xb40W6wXmo/Chv4NgGvz7/8NJ57/jkAwB999zsAgOv7t3GwCBZtTzz1DADg1S99GRfffQMAcGIU6jh7ENSMn3QjzCiM+YmN4BW+u7+P/fk+9Zd8cCgbn/UeEx5vYnwaY8SaK/U9yeezBrU16I10st8nGlroEvVRr1iEWyWHvWI8EvVSv2Iqh54qToZjzcf2ZnlZjyUAe6YCyzoXbwYyY4BCXWty1CXVk1xTG4VfshmEebLcSkqXzqtIeO4V3a2qp0qVKlWqNEiHVj2trXuS8gPi7bpt8jl9by6Tql3R+2iKylyNY5M0H1Ux0SObuWxIBFqWBjw8Zz3Frb0Ach7f28Pj558EEMKQA8BHswCetosOvgsSyIS42Y0xsCDPbUOJkYz1AoyyL0bLIb0NBMV2rEpyHt5nqiqYqI7IQpB7qNhN7NnutdliLOe1PJ0fc1nepBwikBoI5PUfijL1Ab+nM8+8gLc/DpFfd/eCD8SN17+LGxRK9syz4V2YLz6F4/a/AgAsLv5yKP8XQRX1a//z38d//svfAAAc+1//DwDAP/y9f4btpx4DAPz0Ung/Xz59Are7oHq6cf02AOA6qR5Pnn4MHYU+53y6060t2DZIFM6Fc/ZWuM92HdpZUHcxh9w5pc5RfFr0pWHVTG5Ym6pX+tyyfllHR1r1VFS1ZBKnFtmjUQk/U5wzMYWAUjVoeD+L+yV+QquWIsU19wK/Fv4szlzhuGP02L6JcV8u8UtlleVUUj0Vr0WdFh11WHybFMp7Jcf8WfSrq+axlSpVqlTpXsgM6dLOPHe8dzFPq6DNK3OP51BOlOaxDonps7ztVWBQzoRYq+6h7c8aE3EC8rhubIxK27EqXNKaGowYSCXMAdZgbAmIJp35qc2T+NIrrwEAnnsh6L3/8u0Asn5w+SLG5Eg3GXF3WnEma1namIwxHlFeBAJDWxMDXZH1JQyid3DXZVKDd1gQ9sHSApvmOufRUp4NYnThFg4k0Mi1rvXwVK+A39S277yKF6WUs5kxQPidvnsRShKHu8idSmRgAn6bxoojF3PaE8Jzfvc//ft440pwrru5QQ9wcgufXgvc/eNf+TYA4MazL+BSF7znx9vhvRzcDBLI5vxjfH6TcKWPg2R404yxeSK80/Gnof7XXjyLW/MgHV569x0AwNW33wv9ef8NTPYDOD45GRIknTlzFgcHARdp6JnOnzgDAFjMDnCTcmDskzf4HC08vRCe/w1iYDNP79NyAi0TpeES8Jp+Jamjm45KXJTQbV5Ozy3m/Psc7lCOmbAe+KSPiXOdW16H4DKJv2laV+LxmZh9Z+D3krb0+azjBVnAxPhNBWlbmHvNbovRR7+JdQDuZLyNljjzH1xeqVKSuFxx/dLXVtG1N24VC973oID3i+Rp4rcQzUgkvIa6riaXTAmZmFHMNSnCCMBjThnReBOZzQ9w604AQc+Rl+/nus8BAG5c+wQzUlExGLu1vYnNDUpsNCF1TePQduQ3MSa7+yaoPEzTYEYWVvv7YWNZtAtw/HIvC0vMM80qKvH/cE6FDedhKS3aSLK1yTmAPgbaOCXbm/IN1R71Nv0ovWIahrxqG1oQrTXoaDMajTkESih7eqvFE6eDZ/b02ZCQ6Nap0/jSNIz9q7/+qwCAf3fxA3xMm8COC+offyZsHN2tDfxgTBvhq88CANo5sPBBbbQ9CdkLj71wAaNjAaje+Du/BAB47A4lUPrLt3DnjbdCfVeCJ/flDy5hdpuebz9sBnf2Qp3z/WsYjcK98xPh3Vq/CbdDebw7zrQ3gqPfo4Y/Sc1sxbFfTnehdircsiwb23Kb/3SO6YVLtCW6IfGJgtxnslU1BOf0/XPcH5fWn6qSYn97Wm+lUtLPQCdjOckFoMLyIx8X9czlIitplR+FJGJLIldkm4JPbqZLWj93uD4to6p6qlSpUqVKgzQoUZTVUofcNks1rLPLrQTNWRxXiFUGeAWBgjnicM55RJ8KqYu4WnhYSkDEnFJrPDrevYmRn/sWn94KKogrxFk+Q+aVd176Mq5/EtQNkxHFCpoAY+JmzThwkydOTbFxjKQAMsPkOEJ20qCj/M53SHK5du0abt0KKo47twlQ7TYACsXN0Qw5Jo/zKt+1ss3PcC9Yq5LrsB9DwY5bDPCc8uTWoJiA6Zn60Q3PF1a/dK0XdYsj/djZC0FiOHlujMX1AAqffvmlcO2rX8Nbf/0mAOCp4+TvsNHgThckiZvTMC5708CpYzJCeyyosjAO57YWc5y7HvwhbvzwTwEAl26fw7UrQeW0/VxQX51/5vMAgDPHTsD9Ughbvr0VVE+LkcGNvfCO9glo37kR5sbsnYvYvR36016/AQAYze5gy4T5cbATyh20BxjRs0uMoLaNI9T7YNb7Bo0SDcupOV3v3DK/iXKSHROTihXY+zykkEmw3vitSi/ZCAVq7clzdwMqGZmSHrhWFQ+t50GeqayAmNJ2WY7reEv2Dryu36QX9I0raMiPQve3l1RJif8mG1O6kNSfGrL0aZVarEoUlSpVqlRpkB4qRnEvsonsnQUnNeYvnNPmdXS0RkwVoxdzv+IYTToCWi2hvAt0mJPJ5CdXPgYAnDoduN9Xnn0VX3yJE+QErvDd997C/sEtaivUNZ/N4S2ZWBKgOp0GjtdOHOwknDtxOkRGPXkW+PhKuPfqFeJgdw12SX/u0gCjAV9oUo7HGPRBucZAgpOayOXxWEWTOqrLKu5Rg45ZtfKrGJNGvRjhfBxG1ibnnn42YAkbx0/gOoHBb/+LEBX222cfw2vPBSnuxjvvAgAeOzjAz37yQwDAhddeBACcfCZIIDvjDdyaUJIikijG1y9h54+Dl/bOX/wFAOCj/c/hzPHgrX3j94KU8dG1/w8AMMccbpOkPopY6x4/j+3Hgont1hPBTNeS1/6zzz0n6VpPngjH/bfexFu//89DfZYc9SzgF2RiK0PAP1wcNzXK9y7XD9Nh4xKV+laMPaR9BumH+oRLHelf8+mPspd3bGSYW2bMRM3cBKTLG43zufd8HmperxYplo1p1rWyNKfsgDWa1esjhgZX31clikqVKlWqdA/0yFo9rYxNktmLLU2YzhICH7t4sxerRJJAELlvCXHhPFzLeRFIh+477FKoCE6TevuTcHzl5Vfx8kuvAoBYLp09eQG3d64DAK7eCPrvznyKZhosmlpK37mYkemstWhInb6xGV7R5tZxTKdkRbUZ9OO3bxpcej/ce5tiTnnLSXMiNyRcnymZ9JnIucTMO3FcxIGP6lBSRMoMLeGkjJFzMalSrMSSFGFtPL+5HSyVniCM4Jo3ONgIXPrJW+F57/zev8ZLf+NvAADmhqL77nyKx3zABL77f/1DAMDjTwaJ4thTX0G3EcZyl3CMvb/4HuxewIz+wf/yP4VrWxNsHA8WUG/QBDl4M/Tx1NTi8uX3AQDuk/A+R7d2cfMnFwEAH9OcsJyzwgITMoF+lhIpfe4rX8YBjf3odDCj3RxZ3P7oQwBAQwkGDDnxeaskZImADCw16VFkMqktPdf/xrTZcqmuonNYr/yQ3OOLbfZMW5Xe3eSYV4Jz8KV4Mko46Ek20a+0j0HoKvk78PqePkQhF7XhVM/JMGmr/46GHO5KZfqpWUvSZfzm8j6WyCSqhjKt2CiW35x2ns/1r8Waklexsv7SfE3qdS4pZ2DEw1nM7QzEvl+Ar2APmrbRUIweG8FeyX/sgDGh2KM2nJs2I2xskbkj+2eQx67b38PuzQBcjidBBfXUhSdx4UzI0Hb2ZDhn7B5MExapGcUbut2FRbC1M/jmgJ6TjvYA2xth0dkhtdT4zAkc3wyL6hu0WF2/FlRh3lqZmJKoDV30Sqexcp0TQFJAU9F6+GgFQP4OaNXHLmZ8kAQq8XuWXUp9INSO9TDkWdr5MG5jCzQIoPTpE2EjPHUyLNhmbHBiK9x7gwwFps3jmH3yEwDAJ1eDh/btW7dx7aPgDzG+Hd4BaeYwv3oLl8mnwp8IY3buzGl887/4HQDAb/69/zL0Z9xgQf08fe7xUO/1cN++b7B1LWwQu1vhHcy9gd0Pqqz5h6Htxa1QfnHzGtqrob/vXaF4UTs/wPlvBA/xbZo8ixvXcI3B7r0wB9hnxxoniZQ6Q8C88WgW6cJINqXQFL+h0rcXNueUfK+O9FvmTV15jffA1f7378SoQqlDddDP3Fzd+sjM5cCuCrcvamK9ovOGAbWxuaRgsmCX1k9XAPnzpUqPgV7cB1e2NQBur/xEbCOL22Al/eVWLfyDmqe4hq5ys6iqp0qVKlWqNEj3RfVUcvhYN5ri4dvw8X8V54hKKWlHg0S5PkpxQxk30TQGk2ngdI9vBWngzMnTOLkZuN0nnjwPAJhuhIbGE2BCuZlPngoAprcWc4pAeuLUFgDAmgPMSZJgoPskgrnkwu5gjsCVzlw4HrROuIJzpyly6R2LZiP06ea5UO+dW+Tl3Rl09Cxy58jK8wn4bSCOipK+VobDiAMfS2SmMQJii1rARC6tZx0LFUI7JvsWSY+9mUPILBo3An5pGLHhrsHuBW7d7Aev6bd+fAsfvhOkuk+vBW58vrDYI6e9AwTp6/T5UNc/+O//W/yTf/EvAQBf+ca3AAC/+Ou/hqeeeprGMnDye+0+dkiN9/VvhzDmN+lZ3uuAK6RidBQheHNrEwsW72dBOpoeUFKj+QzdTlBHLej9nxkBN370IwDAmKSN443F578akil9+JOQ/3txJ0ib44VHQ972m+SM16GLZrTiZeyjdC16xdI3d7jvMFWF5PeuYEMLdZQ9vlPOuax90H+mqiqTlIuSiDFpvcOd7KuXDk/3MraqChEGMmnqLuhuw53nVCWKSpUqVao0SCskipKTxuq9ZWmkybXM1e6SvO9HZfdeRaE0SVlAccmyczt4E7hJzj20sTnBU08EfOHJC8EM8tjmcYn/tEm66t1ZkAau3riEpyjvAWMfzXiCsQ1g7MSE+yZjg7alsBSzwBk3xJHOugOYloDzdkTHKeYzSsXqYxygEbFBj50Kev0PKG7UrGsRQeTQHWsAz3rPhB/TppiI+mHE5EEQqUBpeL26TRg6boz1n23UEfML6hq5QUyUHbB9MnD/jQnP/J0/D3kjThxbwM/CzWcpjhKaMa6SJLG3R2FOnMUu6e6Pk8lqR/3+6Ts/gdsNIPYXP/cKAODaxUv46fcCB98RyPKlb30dB8T9d/vhuE/9v3pwgMk09O3CsYBzXPMtPiUpptsMUt2ExmBj3mCLJMgDijK8885FnKXQI9/+9SBF/Pk/+kf45c9/MQzRxXcBAB9dDKD5eGLkK/WjMY2jhclxNvgoLVJ/GVQvsqRmScrNNSh+234JDpKWK0Wg1ce8G96bqKfPhH7AJI52aX9iL5yGW2RuRmA3n66aTGnZ65eKvwqVrBvXadDh7ghouK7l7yCn+5a4qET3ZYMYat/0bbrpj/QoAe86jKZhlpw4Fhbx84+dxisvBnv9k9thMba+wXREizxvGMeCKso3wA3Kfnb89AUAQDOycKRqadsASC58h5Zs5xfkk9FQXbabwnSb1DdacF0MTugoxPXYODQ2vMKzpOYa0+Iw9048rC0v8iZadDCo3boIbEtWvYLVk2kK707tzKLiEz6Cv/ROPG7FiAA2tkVj39gG+/tBTbO/CMcJjfel/QnsNGwQZjtsuJubmzi7HcYZVy+Hw5UrMLRRniSLolNnQyDAS29dxP7VsLH8yR/8WwDAGxc/ws5+iA31hS+HII9vvP0zGOISnn0rGAhsPhniS22cPo3nCAgHtbMJwFL2wj1ySDFkgHDru3+FnTFZO3Co93c/wGgjlL99juYCLP7pP/mnAIAxAfMnyON+1BgsaOz3SV3YOcWuqZhdYoBh1fsLjavfmtb5HocXwaGFqJRbu1QmtwYK2qVch8lAN6Lqs7Q5lZ4z2rZwodhWaVNYY1jCM+U70eFpPd+UVf1Yv/7l9Qxfr6qnSpUqVao0SMMShS9cXkMuS0XBvurnqIHtofZLJNE4mTNhVcF4jDNnA2f+0stPAAAef+ocNohjNBQeentrCxujwPGNTQBNTx4L0sN0chwbW8Frd4PiAZ06d0FUMbNZ4IJv3rqK28RBz8mPwlC889ZtwxuOUR44aN/tgAOLbo0YOG8xpn7M55SSdR64Wes7jJnbJA4zWLEy6Exgsm0iAi2JlGgc4eHoD8tqDa99Xwugo0T2pDhWmIiaRKu0DLOFNC7TDYttin11LDhGY+NMeLbjj7+K8XZQ1xxQbPj9nR1sbgapYQtBvTPdtdj/lExgXZDwzp1+AQDw6ZUPcWwa3sfp00HK+KVnXsbLX/4CAODVL4Xov//4f/vf8aVvBJXQtY+DBGI+DmrFzbnH9/9V8Kp+4iuhzPaTz+CFx8K79yRV7lJ8+e3f/VVcJj+b/Q2KyTX/Em698QEA4I9fD+a9n777Ntw8SDbbxzbk+QBgc+GwPQrPN3Yx0m5DJrMHFJa8XSxg6T2zWWX0lVFseCEe0BBF9ZG6q6BqWW2nP9BGXl7Xl81heMXHK82AyFDFPNOpZFPyFNeP5Ndhn5XvRuyqHqQ1qlhxbh1pYNVat47qaR2qEkWlSpUqVRqkFdFjc8Asxi8qYMNiqmegAKzM8QPo75R3m4i8fz0t59VvlyTZCUdOTkQO1zh//hRefDmYS7708rNUx0K4X+5nt2jREU5w8mTgTrcpKc7x42dx+nRw1BpNCMicz3HiVOA25+yY1syxcZwcqfZD/fN5qHPuF/Ab4drWOHDBYz/GbBGkhYYTLnUe3ocKb1+8Qc8Z8I6RadBS9iNK74DOeAGKCfNFZ7w41fmWpYDoPZd4cCNE24wacK+OGc7hGI9oFGZE4L6NJrMnTgZp7cmnz+D02cBN22kwDT3YC5z3+3/8PmbzcO1L3wopTs+fPodTTwXsYPFS8Hp+/6XP4cPLwSFuh3zT3iWgu1l0mO211FYwNjj14ot48bXgRb91PHD8v/gbv4733nobAPCtr34TALBHDnW77QFefClIHtMzFA/qYB9jwj7M7dDW1iZJoN0BHqf5xx/a7nSE9rFgUn2M3t3zE4ef/T69+/d+Fvq7FeaLbTvconSqbRvGcbK9jdEkSLJTAtetbeBpTjqyfdaOjjZzRg0JhijigHgix/wLtu+NB8h3EM/0cYgSOK3L5/hJTKWr8Gr5w/jsGqJ0ISl+9TWtwcic9QbxSkW2kNjosFqQo8BjVzodH1E7pXpzWgFm04QrTAy9KIt5/L0bIh8teS/Je6wCbBtaaae0GJ8/Hz7KV15+Ec89FzaISTOmKjqMCdzcPklWLdbC0iQ9eSyoP06fDOqHU2cex5mzYaMwpCvy1uAqeUzf3gnA60F7C/sHQaVxZ5+yoM2CKso3Dp5DOdBHP5oCZhQWon3KqNa6DrdvBh+MuQn+BSdPkEpix2Gb/D8aAlRnrsWc/Rdo0ZkvAHAGPLH+4g/QRptuSVwUw5KzB6t+7azgYGsp50ayIcsi0e3h2HYYm2eeIxB+OkNngrrlJG2qk71Q/uAYcOVK2ADe/lEI1PfKa1/Bj978DwCAKan4pidO48XHQn3PvfplAMD2yfB+3vzh9/EjWmCOnwsL9dtvvo3HyZKt2Q39+cZLr+Cp7dD+jN7H/u2gzhrbKb71FfKtOEOJjixA+whu3Qwb3K2dYKRw884B9ijM+K2/fiO0M93GfDNY0V14IvRt+/wFbJI1lzNhQxy35JFvgBF5qnPEmf3ZAToKFzIeh3e8vb0t49tREqTZQdhgvPdwnDKRqBk1wlRoNU/8rnOg1me/03M59qypHOqlRGyFoxdAZj7jZibMpyQqU2rt0mTM1iWjO1naFGzhIWTPSTefpU+yltVTYbzUObOqkQdIVfVUqVKlSpUGaVCisCOWKHzkMhP5kEXYFLC+32D1XZEKBTwiDvs0ca6vvBTUTY+fP4uxDSL9iADSyXSK82cDB3rmVFA3LPZ3sDgIHN9ZMoE9eSJwiadOnMKI6vCKKbqzQ/GCOPDfxgig/EOOWPp9ClZ3MN/F/iL8XvjAZTfTFiPyBua0mb4zaClU+alzYc9/7EIA0q8v7mCDANQxHRfw2JuHRvdIB2ZmMbJNKw4X2hghPoOc4OB0qbYpoZaFB+/E18Pa0Pb2lsHzz1Hsq3OBg3bGwTfhWZqGJBWOozUyGG+wFEjmv/s38OyF8P5ukLTmFjfxve8Gv4ifPhkkw2/+5t8GAOze3MXGVngvDT3fYu8OnqR4Ut/5938EAPjBX/wp9ikR0c1PyVOeuPZzpx7Hf/bf/HdhOCg97qkzp7A9Cs9w+kKYMzOaXzfhcI0kijGB8A0MPv4kqKou/uTHYTzu3MK5x0OI8qcp1PzP/uxPAAAHuzM0JA1sklnt5mQMQ6q9Ob3PML5hbKYcSYBSunrvxfT4gMq7DrANq63C/ToqgctiqXGJcHLA3BXpVFF30R89PVDybQIEwi90krj0AAAgAElEQVRh3A2iOa1IFqZsqMqSgckCKKZl1LmBZStvc6Xp6rpgdklNx+eGxLQHTFWiqFSpUqVKgzQoUUymDI55SXwPR05fXTQxlQitxSQjRb7igVDiCUq6yKYBThwPOt/nngtg6NNPB1PYM2fPYmMSON3TJwOH98rLr+L0yWCaeedm4DB379zAiLje4xQSe/sYxXCyLVpyGNuYhmut85hSJNnRmHXyC3Q+6LTnXThiFOrc2ffofKijI+OBrt3FjEwoRxLR1WI6YQewcO/pE6Gd/Rt72KCw2s0klN+aTjHtAsc6mZFi3c7FK7ljEJTGynn0IsrqdJZs9urh4zmxtCUponFg0amxQZq68OQxPPPccWpzh8bgACCzztE49HtMf4+9w+kNMm09Gd7P2HncuhawnRMnglTwya1P0ZEUPGuIcyYA/Tt/8m/gOsKM2r8X6jqzhZbewXf/9A8AAH/4//xjjKiOhp7BEXD8jp2gmwes5Nf+zt8FAJx58RU0ZwPOsaA+3j5w1G+LDXIaPP/1XwD+f/berNmy4zoT+zL3cKZ7b81VQGEsECAIgCQokpKoVrPbku2WpWiFPDx5jHA47Ff/DkfY4Se/2REeo61oyR12W5IlsSmSTYikSAgcQQIkCzUBNd2qO51pD5nphzVknqHOrUKBVHfEXg91bp2zzz57586de631fetbACahxnMMapS7FG1MrlzF9//iSwCAj3/qkwCAm9ffAwDsvXMAy3L1WYINmN6Az32bx91iMqGxnM8JIyn5uAeDIQYsTZ/nTKdta3i+Ls5J+9/0HhYiS4JHrPWSF8HpgLCy2VqIYjmyAJTP6xNA3CpXO4YYYSm7YZLfDInvq3TYpaUnzXjYNd66eQgw+5eRNVGMYgPe8cvK3nQRRWedddZZZxttY0Qx2uECsNajbZglw2QMx89wh4DVJipY/94v24wFlpqKF0WBU1xUd+YsMU163Ofh7NkzCC1FBue4F8HFiy/g9A5HFEOKKMaHWyjY6ywZN9g7orz2/bsfYGeHqLL5kKKSrOyj5CIsYykC8GGGPBMWlch/sGfXC7DsTdiKex24FobplJ5psq4xKsVhW8IhRgOWuOjXGLDCreeitrzfQ87eiS2YjtlO0LKECKuLJP3sPQlKIWU9AYYjSJ86k1K8KJGHFH8ZIOMPT56kY3vm2fM4eYrG4ZBVW6spsDUifEUovI7P05gGJ0+wR8zjffLcWRyyMuuVm7cAAFunzsJwxLZ/QNv94KfEBmunFTJH+7t3i5Roq16BW/v0+QG46PFUgWLCRYuCJ7Uc1dkx3vzTPwYAXPvxd2n7py7i3Keo+O7ix4lpde5ZotCeGF3Aafb4pyUNzE4Ablyl4715RPPpZFli0Kdr9dX/5y8AAPPrxI4b5j0E0IWZ8XgEC5Se5mzNN+RgMMRJlnGZck+LiufO0dFYo7ReSfNkOBwhsMJuVVFU1bpIgRM6efaIrmTK5HlkzFIjVfOARkFYpM7KZ9Yk762yo9atRCs4x5rjOHa7x7R/JbHcB9jGB8VJBkbr2qGacYOZCYf2U17AKo+WK0YNp6WMsViOmoz1GuKKpYJmj25LF26hYjxWAkuYmjPFtdfPceYM3ZQ5d4yznCLa3R9rh7EnQOH7nTvXYDklM2JJ73x7G3VL4PT+mCqiP9i/yT9pkfMNOHT0O8PMoDdg8cApjeO9e3cwnR8snIqtaZsyDNF6GvuRkbSNRc7AdssNbEKeoTS0ENVyDThVdWp7hIzb5DkRlcs8rKXv9jjlMhx41DWNXVMzvZJH0TUuPiBaGWesaO0YY6JsudBvGSg1bUCvRw+IM09wGuapJ5RqXHH3wP7wrNZ7yINlxik54wN2WPTQGbpWOUqcO8sPmymlcG7uXsO0puv26deo+929Xa7UbiaYejr3a3dIcO/sK5/GLa67+Ef/2X8KAPjRd76NYp/px04uDJ+vKXD6HKWZnrlEPbnPvvginnmF0kVjdqZuM0jtLkzQP0VEiDlrct2fNbhR03HcZdC7Gt/DwX2aP8UhVXJfukDOxr1rP9PuWL43hJhohuU8zm0zR9vGboEAMBrR9W/aFhU/UBru6DevMk2H9vv8gM4spjMar5rHUVZ971tkXHAktTswPgLhyUq+3MDsWHqskGH0e0BEs2WhzpY3T74fvxwX9vj3uu1l7vpksTZLr7rvNYdtERuDpcuX/n4C0C8/bNZqOSVjJtL+QVO7WKHxxsNa8whbqGdYPf74gEqbL21eg7vUU2edddZZZxttY0Rx8SJ5T7NZjfGEwtlZj15z1huajmvM+QnVcmVx8JnyzhJoCevIbo9v68By9iSC16e3eD5b2wOcPksh+pCLvmC4Erido1+QlzqriHJ5cAgELlw6xUVRZWYxnlLa4N4BeaSOaap52UPFnvCsYr2e7YGCg5MJeWz39/ZQ10f8+xQ1WPZ4vW9iBXegVIFv+xqR9Ti9AlugL+B4nzyuMcgjHfW8tmJtGeiuMIeTFqvsoA1djbqh36/5RyuVYTLqyLA6OtAmhXk2Xr+ouyNfpvPNC2C0QxHF088QeeDs+Yvah1y0jXJrURhOrTjy6CcDGvfedg7T6/HuafudUxcwPEnX6v6MIrO92R5MoDG9+u63AQANNxEK1QS2pO/eukEKrR/73N/HzXevAAA++/v/AABw5sIzuH8tRocARXMAOWo7W6zjxQWZ8/19fOMv/xIAUA7os2cvUerpyedzTJjYMOXmQ6effRaOU2zbHNHu378PXGUF3DffpHNhz7/sGTQcNTQcsfRChqJkim8jcyag4JDMJ94pABhrtTBP6LSt82gnLKfOxJRer1TPXdKWDffu9t4lKUkyazIsSAirLUYSmzMsYWU78pEXve7o8aYrSpL7VIr+pt+Kh7q+gdIa8PhB52Dih/GzlOmx/Bki5K67Xz3Y4BE5uxKBhBDf8uknSEYq/Z30OB82W7N54LqIorPOOuuss422MaI4z5o046OJeisDVq3M+dUak2jMsHfTenhuriNP+nXtDD5SCzbBKaIXkueiuEnvDbdKMJ6Hlgvc6ob7R/QH2GL9pSyjiCH4HpqaFUunDOBnJaYMpIoHZjkH7X3ANje1EepiXVdw3F5T9HVGo75GMnXDrxpRRMqqUgZ9DnB0Uc2YNpqXmFUczU0ZsGYguBzkyNmDrrkZk4FHm4leBx13z+cYNPSdecVRIm9iHLToDdJgyIY4zNpx06zmQjnCKsqAJ5+meXThIhUnVg1wcJ881c99miQxqukM0yOK4uo50V5tSfhPfgLIWItpOmY5Epvj0vPUgOitdwkT2Nrewc42nfP+7cu0D55/fV8o5jC5Tdd7iBxv/4CkNT7/uxRRnH3iSdzN6Dcyjhoaxn1sCJge0Xcv/4R+8/TFJzFmzOH963T8b5uvAABe+XtfxEu//usAgM/+9r8FALhz4gR2OZo6wwDz/YMadosIE/ml5wEA996mpk399ghDlhQZMtW2ntWouKmSBnWNh2hpiafduqhRJpiRyKnYLFMZGtEEa8Zj8Kmj5OK+HuNyWWPROJb80MjCJHpLD/ZcTXJfRq99Nb8f0/pphPBgf3mx4/Kj4Zyb+kAYm76/dGzroo51QMaCBMkynvpgkN+kwPw6QtByl9t0E78aJcUdbwbPj9OM2vigOH2aOOtlmavEddWTm4hTOsYAoJSBiJHVcw9emxRM9jAfbeOipT67Sx/ybyeaSUzf2N4e6CImPavlIXjixBCxL82cj3sMU9CNWnFKZDw5Uq56b0QLmAC2WVlga1s46zRoB0dHmPON7Vhfp+xlaFlW3OYMOpZ0rNNqiporvzM+tlH/BOAoZZGz9Hg1bTHnB8WM9YWkEVBZ5HFxyCWFElDJYsLV3WVpMRjQdjPu1lfyg6N2QGBpdZeG+YLxWr2zY38Z3krmwminj+deoMr3nVNMjpi3uM39ot/6HkltFzZDzmMuDZ1a1j06cW6ExtM4zzgHNjMDDE4/R5+f/RgA6medZSyiaHkh5QMqUaKZ0/E2nGLzVY0bN4gB1XAa5qkXnsf3RSTPi2gevWYA7t5l0JlrFO7fvYmdHarYHzU8tjz53/nmV7F7n9JcW9t03Du/8jlsnyBG3XssY373gwOc3aJaHn+JBA5v3/gB/c7kEFNOb5qS6zOGWxhmNFFrZkLVTZ08rCXdyuCzSdQT2KFxzqtjleVxGZD05ng85fOkfYxGQ1h+cApLynmvKVUk9RYri7DSI+JvJhSnNQtiSB4km1LL8Z34m+sW6MUFkuqrFveVvrdplVq3hq1jd21a69LtN+0v3nJGU7sri30IcTsRfky2Sf9avS4LP/rA4wW61FNnnXXWWWfH2MaIYotTKMSJJ09gysCo4zRJCB6eQ9ymYRqfd6ob5F18Ui2LOf6is1HkvdCzsOTmQ6dObisQLzUII9bEGY22ERi4rFnLB+0MhSPvynFax+c58m2ucGb9oJI1eihyoTMVCqN3Tv9uOXzPcos+h/emEeCfvZseAJYeb9o4zhlHEv0BV+UOB/j5faJ67t0nD30YiEIZbIaslGOj38ltT8fccw/noqm0WjfnyKbkKKYsWriWq8bZ0wwWqgkkLEkYs9IC1fDUystt7OxQ6umJJwnMPjzYx2c/T7UHJ7fos7Zqcecmefcf3LrJ25GXf+36fYxO0W+++CrJjE9nwOUrVI/Q26LI92jSwgdOtzmaiwVf/9YUaNmbrjjaOZgeadvawz16fe7SJRQ8vs0ReevSAKp1baRVSjrKG4zv7fGYcgQi0aUF9q5TCuzNr1Dl9ctNjfASnftRTeP91Meegv/pe3Rse3TOw22KvsLkfbAjj5ZbrE4OG2R9ujcl6p/P55jNJZVJc6zHtTt5USrNOee8ynQ6hvOLLQOsNXqfSMag5XMaH021WVKPq8LbttV5HauIgWUwW/utp9DrUrOs1Bb6Rq8DY1eigdTTXpcSWrVVoD2C6maD/7wJBN8YHaw9BrP270hCSCKPh+jjHZJroPe5NhJLt3t06yKKzjrrrLPONtrGiGKHFVHzIk8wCc5ZC/AaPJpWqnyZyukDJuy1eS+U2Tw2f1+TH1y2D1O1uLwfg9iAJWeU7vTp0xiNOPIJ4iERkNj6IUoGv2v2eIoiR5PTsWQMgg8GA/SYrhn5akwtrZuYN2Zq5mxWa17XS6RiYstKNycPcFJTVFC191G1RK0VTKNpAM/Ne8BEgXNPnUfL203nBPyWGVWD22GBnHWgsoKO1RaNRlE1e3l57iAp6tFQKJSMp7RzOKZ11hwJeXj4pdyv90F7U6hfxy1i+/2zOH3qKR4HGpe8CLj4BBeiTWg8itLjM08TrfTjrxIt+/CAqK7vXz3CN96knP3u7tsAgPPPvYRrnMcfMmng87/+D3DnCgHK14srAICDO1R5vT+7i7qg69Fy0eEHR3e1d8M7PyRw+qVf+Th6p2jee442pMmSMbm2PnA8P3MY5Py39C1pmbgwPzpCNWWKNP+Ou/k+vvNnXwUAPPUf/Rf0vSefRvss4RbYIm99u6CIojJ95LnMHaZHtwaTGe23YiLEcDiKUW3SiAgA5nWlCO2Qo6XBaAuOsSApoGuD1wii5DkzYEyybRul4jZTUawNyPi+0ry78UrYEDMSift2hTYagkemFdTJvFI9pyVX2kSMTCizIaTf3Zz/T49Vfyv5XvrO8vur35X3Vn8jrPyxusu0VM4sfLyIeRiY1ZatC7Tb1X3oe6sVkY+MqQDHPSg4pM/zPHbB4geEhK3OBwUHq4rea+sAx7IQ+rpGaOsXbcZYndwVVz1XVY2XLnyM3+Mwnx8UuQFKFtATWY/haKgMEH0YYAznFm+yUEnYl6FlUHMm9KGQq4THgFkkxnrU3JxGbmhJ81TzA9QtAeciQe58Hz1pUjOgB8B0MobnBXzAQHSv4AV6e4j+iIUKORXishY1L2IQsNfWKnAo3x2wpHfTJrUxnF5s0eqNIVWtwYaExCIrKTeFOnURzz9PVcw33ifZi9HQwVo5d645mVfIc2HP0TkdjPnBWR/heRYRvHN0g8YPfVy9Rg+KX/uNL9LxjqeApzThF75IKao+p2iO6l1MuZp+zo2q3r38Y2wxm+rt79O+vvj7/ya2nqAHVfXeFQBAwYus85TSA2LmxBEaKwNCr3KjO4+M013Xfk4pqDIvsPtdqpV46jO/CQDYeeFV3OZK9XO/+joA4Pp3vwUA6LmRPngsz01XeuStODuxSloujCzeQyZVtI3H/gERNw5Y9nw4GiijSWorRDIFAJpGhCJ5bvR6KJguOGNiRtu26giOeK4VRaGOkqSlZH5neakkB6mzsQYwImmekFDEhE0oFkJYs1oGrCZHjH6+6nM+KBGzni10vK17UhyfXz+W3JN0DdSMHb+zTH5Kfyok3zUxn5bsd/ULx7HGutRTZ5111llnG21jRDEYEMhrkqe60CWlsUkIQFsv9nz2LkQRQY422tYrCGpXUlAfwjY1T1F6YK5A+3xGx/bd776NT7xCEcXFC0Tb7DNIN9rJwGrMKIdMIww1mpZojA3XElR19KpFL2fImksm5JhNJ/ybdIxboxMoGCSUpjI+NJhpt0t6rwC3wUSJwBLQLXupNpQoDHltJ5hzf7g/UY9vyDLnwz5XOo8y5HwOYL6+h4ENFDVYzqNldoCCwf3A9FzOYKDMW1iu9TDSZSlx6CTFYbKgAKAwZoWPPxwGjIb05uufegUAcHD/JgYc5fSY5lnmE8xm5O0GQx7r6Sdom5NntuF++jM6lRP03mR6FZbB3Z//mKqwn3/6Ffzf3/syAOAru/+cxorn8Pbps3ji03Tdz16ka7U1zHH9GtdbyJz0Fueeex4AsP/Gd/hEuaFSMJppbDVwStr9aO9xGkdrLXJ22/bep5TY8GMv4wRHD7tX3gUAPD3ehz3JVPRXX6Xvfpr7dX9tjFM8P454HoZeQE9SX1zoYrNIP28ZaJ9yu9QsL5SYIvTXpm1Rs+hhjy94r4xaUsu6bE3TaKTS5znmvUfFqgUT/q1er4fBkMF8bYwkUXqB8dGY90fzyRiDoL14aSTT5INQfKOlx5XUTSk4vQpwLzvuC2D5ptzQY9hHUgqQZNNiGopMWcN+mUjA9NilCMTAJgQBeUlSUMesxV1E0VlnnXXW2UbbGFEUOctUewNXMv7QZwregNUo6xbzEXkHVS2V2QFNTU+rOdM8q3kVFUY/FEFr0ZabrxPIJe9JNXP8W566tz7YxTe/QZ7i3/sNkoW+eI48oFBbzI9Y92jOEVFo0Ah2xy0vTVYg53yt4AaWQerZdI6K8YqyR+O3vbOFEye4jSqDj/N5o1XdZcGU1pwiBWfmCBy9TCcEqDZtwPAkAZyFIc2p8cEUriFvcGtrkQqLvkNbcMjCEUMbFDrQHHGWl8i4WjfL6BysEU8wW6/wK2CYXFATohaPlQiHzu3sEzUuX6F8+ydepGKyc2cuYo9VXbdG5EmffvIJTGryurOSvNP7Y6LJvv3zt1GVlGM3rIhrmwDDzZfCPkUgr33hRXz6BQLE37j1TRq/Of3O0bVDXLnyDgDg5X9I1dLZmZMIrL01vsbU3MvXcOFpaqP6E218w8A/TIyKBZYA4IzQrekeEUKEQYBlL7k6oGjp6O4efud3fh8A8C+vE713dv8OwBLheyN67f3jPwAATO6Osf9TwnZyxttaf6S5/mAkmp8lleT8mcijzyaa6+9xNHDi5AnMuKju4EAKTwtsb9OclX0JBmEzq38L7pdlmTZHajjE8t5jzqGyeLADBtCBTIFtmS8m1T/lc6HxFtBrEXgNwSbZBLvwGe33IXikeJDHLySAx48GPmoJ8WXF2TgEBlF9V9ZCswiEy18PqAh/mOiniyg666yzzjrbaMdEFOQteN+iLcgrKArOLXM+s9+vMRyK+ihjFE1AzSyjySHnS7MGjYsZs1+s8RPW5JozFz2qgAw//AHJRpw/TedXNFzcVJfI+7ydtMPsZzAsJSHU4CwfCgEKzZx7F/CptS5DyQVJ26zNs7OzgyHnbXfvkWfZtFX6rKfjYAyil51AYOrslD9zPoNx9LlvaF9HBw22RtS34MxZem/OEUvW83DMLBJ9ex8MfGC9HiPSJlH/R5pOhCBFfm0ybrFdpnhcIotifCTMStFgr896RucNBlvMYjoiCi+GBZ688ByN0RZFR9PZHo6mJHdx/4C8+/fv/RwAMK5qOMZUAlOwx/tzDEEU22zO/UWqAfZvElOq4Lx3yzIt/bClnn9b0xgNnEfJdND5IW13++YtXLhAchotXwORoymCl+CMmuWA8+miE8LDmOVcSOm8NpsS5tTPfv4uPvfb/zYAoPoh6Uzdv38X0+efp11wbwg8eYmO+3d+D8OzNI8Ovv5/AQB2WgOTLdNMAzK+HnKMFdOubWYh1ZESiWRlroWbmgnwDodHHOlxZFEwfuFcpLYK5bJ1jUZYmWqqZSQPghh5zGua89k4U5q6SbTRpFGVZTfZmoCQ4g8LrwbLmm70+nCRxC/LHsZL/1AlABveDHJ9fEoNjq8PKgx8mOPY+KCQ1EiW5frQkB7NNaeghq7RDmmOczSubsHrFSqWgj46nOrE0YYiyWmLhHEMQ9c05EjDypUQ0ySpJwnBXNTryfQOh+NOPrMxvddMaJGdFhn8Pktbb9E+RicyOE7JOGkBFzJYTkNV0u2NAdgi28bpU8SJP3WSXtuqwRF3MzscU3plXh/oAyXrCWDNXfNCBpPRgrHDnczs2GE0pPRVweB0lgGntil1MxDwnQUDYQFv6FykSt4FwPHN6BgI9q5C8PSwqxoCS+cNaQtN2zFq7vwmMuo+swgi+OiTgFQWIF4A5Lgunn+W9LUAFJw2Oji8iwvneZHnNFN1cBfVnMZIFvIe12IMsgDwQnT7Nn32zMVXcGZED5u3v08PlP/5f/0neIdpqKKRlTOwDO81JbTNlNnq5j1dQCv+zXs3r+PS56gRkeOq+95EOgsmeko8/XITm2PJDeiY3GFJhpHOne+f/d1b+B//h/8eAHDUpzF6vpngLF+Plp9K97aZ2vzq6yj3aFz2vv3/AQDqdld7PReSS2wNHDe0spx+FA6KbRpYTh44Ps/9/T1YJ42O+DW38CwWOeYuefJZv9dDJlrzkmWEUfHKlsc5y2IVcZktSpsb45EVIu3PD7rcwMoypIJhrY7pMkgdgoeBOD5SuxPJFFCHJqZDZbt1xQ1RZiqsUko3WUgObp2lZJsHrsObF+iNlNUlsHrhexar55oC3MsPBnP8w6JLPXXWWWeddbbRNkYUlsPEwhbw7Nn2A3mHrmWZVTg4JxEFg22NQ8uS1Y7bM06OKtQVeUZSsaxAs7HJE9Loe2E57DTLf6d/rFGnNV57N0t84lqH2ZTeu/E+Hc/Hn30eALB7b6o01pILiIIrtKpam/dkBiWnF+R4xZvLbQvD2knzioDoo/0jGE71CLg6bw4x4c8te+vDgtvNDlrVZe+1UhmbSUdMBU1PnT4BaHqJQUc+Lm8CVNWX2562IdJtWxclzhspRJPKcJZdr9tKPcaoGm8SRFxAtCjjLpFFaGlqbfefRs4uphRfWuvw/gekUTUa0nzybY1zZynKMPfpuKWSe2fo0HIapeUiyXNnn0DPUHrkuUuUpvng+geqhCpRrlA6QzCanBBa6OHuvcTbpc8Ob9/C1uBz9F0utAwM+gZrY9MmGQGPqDigDcPlwzgnhU6ewwCsPDw4TbLrN9/5MT77OhErwHperVTQNzV2d2meuhOUZqxmd9DjQkWRCrc10LDisLd8DQSIBsC1eqjZc2zqGgVHbCrFX+QqqZ5BJjvZbF6rZHtPNMSKDJXnVCdHi5mxaOaiAsxtdzmaKspCG2ZVHJ274NH4RTDWmEipVdqytjq1WpAXXCRTaIi3ICkr3vTS/5NivMU6NAGKjynC020fnEpft991e9lka8Hnh+hDngY76Skv02MX97vxULqIorPOOuuss822MaLIxTsNHs4JsE1gYr9P3hxMUCVNkXnw3sC34oKyl/NkpsV39+4SuCUtGEMImgtVitpCDvDDUc1SvRd5elpr1bv7+Xvk1V68SJ7sJ155FgW3qTR9iigmbYOGMQ3R5s8zIAj1tOD9i2yBm+PgkDyqA27bibavdOEZe32mtJjVBO42jmiS84JxjiLAgimFPYrc5ofAmZIKBA2HNjbro245l5wLjTHpQeAFmyDvrW5bVFwI2YqGlJui5j4bDefJpUFNWnSlmj6wRFFE0pjeIubAWdNo/4D2efdujTOnt/gz2n5n+zQyI2PJAH2oUbOnDUdYjOXWqNPxLqZjkXngvHfb4JlnubHWIX2v3yvhWiEcMHUy6d8p+ejDA/LQj/b2tM2pFA/evXkTpaXf2GJK8/yutKwNUV4iUSuNztiSB2tNMoXjXPeME/gxAeg3v/JXuDqi69x77Vfo2ByN2d5ug61PfgIAMARtf/int+Ga23xITG0G0DiRqxH6qpybjeMNmR9G5T9s4n5GyQf28vnwB8OBKg7XlZBXDPKeePyCm2UAF2yKuohEbT4AeY/vJY4ycmtJGwXQa+dcq3NFIglZHwwSFeoEB4g6cmkR2dL1SKn0K2D5Ygyw+vcmn3rd+vTREXYeVql2/WdxDJbXwkcpCtz4oNBewT7TC2F5kSqZBQXjdXEQmWXXAr7h0JUXocz04LzRzwHg6IgXqNrFMEh1Xh4/2FkXngUfiAUCYMYaOt/5AQnCZaMCF5+kWoY531CnTm9rGqpgJo/JW1QtS0tLz2LWxun3CtQs4mY8fw9n0VR0Y0hKpuh5FBnfeJ459nNi/dimRckVy4M+PRzOX3wa29yvOefPxtMWgdNWvVLa0skDOj64BWismxoN/y0pJa9yfog1Claqti0yThvJIhuMWQQAdZwFzKQPq5oW+Zu372I45KpxLnu3ZqREiSkTCibjGp4fVIfjis+PgeYGeHN00UgAACAASURBVP8Wifv1WSzv7u5NfO4zXwAAPPs8aTN99ctfRl4It15uCqmp8Vop/MIzBIL/+M4eGmED8bzev3eAjEH60QmaC2PwdfGtnvNCd7UH8NLTildhSbkAFcEL+zSHpvf28J1/+n8CAMpvUY3Phd/+DwAA51/9LMZcP5NNWZ/r9N8gfEDpwcD6YDbEhVOFOPncXcgRMlFFoN/OYOEgLQO4PmLuULLYpXS9k8egMUabNUntRDWvMZ2RozKASKtnsQ6HwWyrDqFBKw+AOHixK2MpTChHLCtAX0U3ytq0qprPJc90rRKlguC86rBJzjZqSLmVxkVyLLTXdQyq5feOc16XK8pX7WEX6nXifWu7460T+4tl2CsPiEdhXXWpp84666yzzjbaxogiyqEYIEhFJTfBYe59MAE9lm92TPNs+kDTZxB2xJ6Um+HkCXoqT86d5n3RZ0eHY9R1pLrRHx7rw7dlfvCDzZjV56C1RlNPtqBjvHOPUgtvfPO7+MKvU+hfjihi2gotehyBDEZCO/SYsHc3m9J3y4xrLXoWFiLhLZRVILcM6HFlbN3cVwnvLDBoytvP6zHmlbS4pO9dPP+iSlxz4AYfDMqe9LKm7SX15F1QvSgBup2vVRXUK/0wA+yAj5fGviyZElnPtJJcPExSmuQDsNIqFOQqI16VLdaeeuqZCyj6XL/AwOfe4Ry9nKXPgzS7atWzFdZtywSA8XyMwZY0X6LPbu9ewZvf/ToAYLtPnv/u7u1Iq+QUlaRLbGaUbPHD71Kl83T/QPWZJMLav7uLgj3xHW4edDsF/9b2el7ipSfy+XLN5D1vjFZrD1muu90u0R5wf/DL9Nuv/bt0Tb6/1WCXUzm9cxQJ9V78Vcx2iRIsVdiFb5VQIJGFE/lwF9NSUj/jCw/LkYGOewh6HxrVkJKUkkXLytESoRpr0Wcl45bTRofVOKoV8GzI+f8uBK2x0GZJxmqbAlFPsJnV39We8RIdhFhjoenkEI8356jYwel9LtGu1AKF4LQ+KKWxapqSxwMLgPUGW6DqL6fLHz8FtSn19ChRwabUU0eP7ayzzjrr7LFsY0QRn8hmtZZNwCvbQ5aTZ5SzlzgYJFpJFVe+Fg5FQR7JiL31GXvldd2g5VxrxB4NVp7Gj6jIuG5za60KUDoup7YMrO3tjfG1r78BAKgc6RJ9bvQyCs7bngiUY9/u9xEq1n3KxatmD8zlik2AX03oYTAQ8J9brR4C4EZEjquN25ypoq5WqKbyHJW0ORoenHlNUcxgWKKFgLdcDMi0yRA8Wu6Z4ZgKG7xXDEi0pAIyICyqffYYoG8Kg5y9VFGRdfCJrhO/GBMjFAbQ8wE3hRq02DnFvTgY1/KNRy79PUWNtQ2omHY7ZQLE+B7RdafVPZQD2n/B1M8DzPDX3/qXAIA7N0iR9P1btzHs0TUSCna/R9dg5/QOdg+pkPBwl/YbKocMEY8BgNn0EIaJCT3VbBIvOJ2Ta/CvZa/MJ0VO/JbLLByPFQfY2JsfoeACzvY26Vv9zR/+73Rc//V/joZ1xTxHo8WlT8D/iHCZOVO3+9kMngsncwl6kupc9Za1d4ZBkHauqjrrtHVmUUrDr0RPib8qGIVzDg2rMWxt0bh75zFj/SzxQqWh03DQVwzhiLfJQqzmhyoIRE9ei0sFe0PQcRbcwnkHI5GKRqhG57OxMWqlc7EIonYslHPvIZMxLGBvi9c0euMh2Z6HNuq4rsdAHsM+TORAx7S6jw+jbLs59aQATtpIZLGjlfFAkctCx+J6pUW/xwyMHlcbFw2yjIE3nkHC3y7LElMjTVM+moEFFllPYiRhIIAepycaEcgDmhndbOPD+3zcHpbrEaop95IOwHZBjJhQ0RDWjrZxdY55kEpT+p1+r1QZ8CnfIAYGvZwbyzhKxc0rlmout1ALMaAZ8rFuYe8+sV48EwRGWz3ssYihSJVIrUJog7JIhP0En0y4IBWsBSxz9zPpq8wAeWbnypyRxcRao5fIKcstmR+ZNFCi7x0c3USWEeDZcDOoUX8Lzz/7MgDgaEyL/N7+DNWctptWzAbzdL6jUY75jI53lzvW9codvHODpD5ucz1MVhSYModf5Mv7/IDe2z9QYFQeZkVRwPtFEPTkyRMYMyvq5pUrNEZSG4KHTCSki4SuIvGJIW3k54ZrPawHOK0jC+Pue9RIaftbb2Dnd6lD4H3usT049yTKp0kyvT6gh9+8rWMzG7+YBgwhSneLTEYIDl4cJb1mBZzMO2ZmSY/tvCgx0/7ttK+yV2olvjCbrLEYDmns/RIQHULQdKy8Nm2rDyNhzsFARQzlusRFHsg5/1iyVL3NDDynPtsqSs3I4ItsiddOhTa2IjDiMLWaSpXjca2L4oua7tJG8ZFplfLeZOw/OtIT7+/RU00fpXWpp84666yzzjba5spsG5K/6dVrpSmSV/YSWLitNQE5Rxf9UgBShyybLexf9Xhym4SA/BoCVtSegtHQeVP4dGxopRLHwkFn+pxrENir+OA6eZXXrn6Ai0+Rx285DYTaYSDNfTLS6yk43nfeY15JOoi8rNLWOJqyjhK3kcxLC8vV7sGStHTD/aOnR1MIGb3MCVCdj3N8cO06AOCJp6gvtveNpg/qqVCPxfPIUHhuVgMGIU0Ny3m3RiIg59QrdAziqssbgnpQsVGVU10dEzdTE+9UfJB+v4eMx7niVKNpPN67/DMeDzqO/aN7MD2O3GqKLIRqm4UM9ZTO4dmnKCX4/tX7GHK00DZ0rfpFDxmTCgpOc824XmTqW20oVEgaowlKpz339EUe26fw5T/9EwDA3k2ixYq3D4NViDJsUOQxJunaKcBrrO2eMmU1DwaOvdOWPy05ujz88z/BhdepUtxvUVOj+Z5B72XSo6puXKXzrA6wxRGyYZqxeOjGGzhN68SGY95xupRPZntrG9MJXaOqksZZTMlVQjM0PdU2rXr+XHKFtm41guixysH2Dl0nay2mPAekLicrcuScIqo4Gmwah15PQHQBwmn/zrtYr8Vp19AEjZC1ottanZdKmZWB914p8j7EOhCZz5kGNk7PxftIzwUojbvc/CgkBJyH8fw/DD1203uPY8cdSxdRdNZZZ511ttE2g9lCIQMSz5KpsC62RBWgTABS54Ag+i1GcpKFegdSZRkrRyOQFAtDzWpEsSAxvGqPCtLIOZlM6KNOz+HOXfJ8vv3me/jNHtEvLftUwws7cNJSlM+vx/nSxo9RG4oekDEoW01Rzzifyt/b2e4jK7hYjqnG26DIojAnEAJhE0+cJ0rk3v4+bt5kfaRtBpvbTCmflim21m/pcWWFUJnlOh5pLnnmCBuo5kewLD3uWtq+bSf8/1qvszQp8t5hUTOHcr6CZUtF7XxO28/HBqOMZdeHop/lMd2n6zhj7KGurebHy4KwG4Y0cO/2Ic6fpGZCn3z1VwEA13/6F5gd8Tg30lCqjC4/57RP7FBU9fRzT2M0oO3O7XCUNp2h3KLxuvTqawCA3f19fOlLf0HjwJLbPWmogxRBO97HSgFgqRjOfFAsyEkuHDlqlrIXUa0+H3/7/nVUX/sancMfUETx3iBH+wRFQOZJKshs964hsBpCtvSbCDF37oUe6wMyK1gdE08mE2SCeTvZjnXL3AQ9vn6DgUja12hamuMSIcB4BYNr6fjFh3Hu/Dnkfco63L5NleXBexQFU8cLJhZksQBX6dxCkshy5IWA0xG/UBxO5PNNFnHU5cjXWB1nIbSQuCpnGrQ3QRaBdo1UeB9JtCGtZ7HQHOh42fMPExV0GEVnnXXWWWf/StrGiEKKrUJI9FpaKc6JjBcptmlYEsO7KNPRNCIj4ZXyaVIGDShPLrpCUtCXshYSzYgH2rpowpg1irKI+5VncxsoegjWIBjyfoV19M5P9pFnlAf+4t8nSuLpUwNtvCJa+lNu+Zr1GrQg+qV4Y3Xdg3Gk/JkbwjTG0zFstsvjwHTXHqmJbvfPYtCjXhYG5BF/6c//OWY1sXyKPh3vqdMnsDUi/CRnbajtAbdLzfsIHO3M2qgem2eUz7esduvdBE1DmEA9o7EXrSDnatUSErzKJgwaVUYNNBL8JgDg4ICO8YPrh9jm85pLNDXcRr/kCIspnWVvB4cVeZkTpnzOx7TPKz+9j+efJubPV/78+3QusxE++/pvAQBef5UOaHy0j5rP5eRZis5mTHWdzR3G9ykCuX6V5C+m4ykOpxRZfemrXwEAND72MMl5TgqT1zmntMqEJ74I0gDqQS/MPW2rCuQSkfHHVZahlValHJH1pb8Daoy//GUAwOnP/DaN38XXcHvKLVO59Wt95YdoWRKk4PvFBgX89KK1HB211mEomBRHi7P5WGm6BYOGLY9FU1ewEqHy9nmWE1sIcX3IrUWPQ8GG55HIfFy5chmGmY6i0ty0wPiIroFl7KgoskT7TcaSXq21ipEpbuqtNlOSRcY5pxkLMfHyi6LUfciaFbyPOJwUCtos6TUiBX0SOQGWNcFsQtP10oZ2gR67Prr4KDCKD0N1/TDHsvFBUTXMw2+DPgwkBSGD7l1AzROiUrGwVt9rOPxsqkrlyOWayj7ruk5E+1YPfJOuyeKb8hL1TTb2FtG/RAcnV3BLFj5XA1d+TpTMz/8KfePpp16GkR7Zc6JwnjzJYolbfUyuU4qo4urq+bTBgNuOCdg7GR/Bm0M+F3pQnOT+0U8/fxGupZTI17/2twCAWx/cQ2B68dZduvEQhtgeSJqG0mP9gjuTFT146R2uk7yXjJtMNKcLv6SNlO0arKbWCiUbtAomxhA9occmnHwAuHbjfVx6/gUaD14g5xnQ4zTDaIteD6a7OGIwf8KNgi7/jGoKrl65Chtouy/+5r8DALh+bRffeOMtAMCUx3k2OcScF36pjpeFoE1a2xRyvq2LuL007wlxjktqRm5+E+wCzZVeQ5ovxaKlE9Do15w26JGPHCxX0Uuleq3Iah/NAT3Y3Peo9/jZ88/iPotX5hcv0Wdnn0ezT7Tinpnw7nnxDgaBhS1rJgg0NmDOelzSdxs+LpJCeddrjKBpyPERzdu8LFFwjYcoDgTn4dpFYFm1oepK1w3Zl7UZ8p6kgeIiv3x/y74oBSW1S5LCLgDR9FIGtE8cTCatJGkjSTtLPzNbZDF13kY6rQhfWq0Ql05+ViX9c9ZGC00Lx+SJIM4nXKKXtQh+x4qNtEA8ptz9mgfMQ2Wejt3m0dNXXeqps84666yzjbYxopjNyHOoax8LdpaklL3zqObsXc/Es5thPOYWl0x5m80mWn09Yznpo0Paf1VVscHMUnqKflQAp9jeMCwBqul3UsLiRvBnScIYMDFcD/EZWk3pvC7/jOip5/7D55AVtN3du/TeyVPkXXxw+y4O74tiKdM3ZzkMK78GpsTOZwFFn1uhihYOS7k3TcCYezife4IihU9++lP4y3/xZwCAvKB0VFkA01N0DoMBA6Q9jlw8YjaIX4vcagVyVMo0UYKaX1spQvIR8BfdHusdjJM0VBL+isqseGrstr/z3o9w4QKlxz7xIslljyeHuHbjMgDg9c8QQDs7uodpzS1Ya06h8Dl98jNPYjajz+7sv0P7/dl7+OCDn/E4R8/Rmui5A4BJKKhGW2eyZYhNagQsDcl7Onfi/DBLadC0tG1ZW8yElJDBx2UAJ7RVocJ6D0DaBHMaiL/WhhyO00C3vvLnAIBXX3wVOx8n5dzD6hkaq5d/FdUtovM20z0+F6Zio4VhssNM0oVpPK3efa7Rheg6GQaarQdsLmAvfa+u5qql1WcyR5EXyER9gNNRojlljdWIJQq7hhgt8CEVRan3srRPjmMcCz3lFYiV9RkXj5rgNS3mmAouxYaubaPem1zbYFda2oYQFMSWsoCafzMzGTJOPUmPdFiPYKVpE73lfKUpaG0XLALPSeW3SajMcdotRyAJQUHGA9HWpqhW3kks1bk6JsroIorOOuuss8422saIYjolj79tvfaSwJKmj3cB8xlHFBN6mo4PJ5hwRDHniGI6mWA8pv3t3iUQ9/BwrPsQbyz2VF94VupfUb5m8cma2sOrIq7bx+JT3Nqo3/JdVh39p3/0R/j3/v0/AACMtk7yORGOcePqAao5Fx5yk6CyOIGmYrVULgQ7uXMRFfem2GFl1HMnCPQdH9W4cYOA18uXbwAA3vjrb+D2rfsL5zQcbuHMqScBAL0e4Ra9gnPFsFo8OGVtqMPxfTTNhMcoelRGQc1FSQIDA+9i3ph+e42cgIk9AqJECL3kGfDjdwmAPn2Sjq3MAg72OHp486f03jBoj48nnibc5eQ5OrerlwO+9xZFIN/4xt8AAK5f3oP03pAmRd61SrLQwDcBkc3KVPBxnom8A9LZ8Hh+1NpWk8HAS28XyX/DwHohWPDxcu6fjpuObXyPGlx98J1v4PRrRBOe7jC19OMvoblGEdvsR4SRDSQqyBwsu/A5X89RALKwiMEEOGRCGxUcgL83Go40q9A6ocO32t9EgK3+YIA8F+o441U+jkOM+KRwDWjZ0xbMEkgK8qRhVtJSdnn+ee9jcWEmJJOkaW0QsFlkQGKxnEhzeHil4MpalOc5MtGJ4rGS4r22qlFKI7MEf5Si3ILpznnI0XD73raNx8s/ru1lg87DeF4K/S2GDXiQrV/r1q+jK1sdg4lvfFAcHsnCZCRqgm9lckVwaT6nCzCfUag5mUy0cY3y9ucz7O1zD2lpWNSIvHDCeVaQNehJJutR8hBYSjE84Ew3ofkJlLT2U3mVRXLKjXT+6I/+GP0B3civvkqskwzSM/uE3lyOGTfXbuyhbWixfunFVwAAz529iKJPTKjBkOsHGBS7vzvHT96mm/2P/9k/A0Bjalkf5/49Gsfr1z/AqVP0cBFZZq2ULadaVVt52n7eHGr6L2pCOTD+hoY7EHoX+yULP12kpR+2SlSrYoNFn1kwL71MzKWf/+wtvPxJAu5PnqIbau/gNrY4lG88jdVkSvNkOp2jz2J/V66TsxGQqe5Y5NPHY5Fq80wrad0DbrI0ecQL88InD29RIp/3tc5hMUYXhRTAjH/x6iCy4NYq6N3nxj73vv8mLt0mmfHtM5R6Ojx3AuUr1Hd7fpkacU05ZVqYI5TyoKjZeQkGjdz90vQnBARO9cjD1/E9in5AySnSqiIHLzinneqk4r+ez9Cy5L30LxcGVTDJgqgPCq9S6XId14HZC0J3yppkzacyak6JfhXtW9JKIu6oO4g6aOIQJoKPIaTO0SLrSR9YbaspJS/absbG6m4W+Mwzq9LnLbcMaISFWEVwXzgDNlUUW86ArrWHux/XPjAWlr8u9dRZZ5111tlj2OaIgmWZA6ymUbi0As7FyKLlWomq4hTUdK4c6oqB6/F0gv0DAmirutX9AhTqKZCU0PH1yWhXPf6AJMxI/4+HTz2tADjBYDn1lLIf5el/cHCA/+V/+t8AAK+//il+Je2dF1+8hBn3Qv76G0Rt/c63f4jtEYHS/9V/+RIAYHvrHIKhsRkNyFO7zym5P/rDP8Of/dmX+ZAYgPMehRHuN43t3Tu7uHaVwHQJaw+OKAW1tTPEaIcvL/f3rpop5nPy0muO/up5g/mUrgcL52rP7GndauqwcTEtsBKlrWusIirDIUaLp89SVLC330OwB7wdHVvZr9HMIxmCdkbHPxk3MCwH7jSihbb6jBz7oIoA2go1UddeOxOWKJTk9cbvPJJpOmONLZQHy9y1ur1m7IRgwdFdr2eUV1HNuL6kuY7xt6h6/NQ//k8AAPs7O+h/giKK+l2KKKq3aT617RSG62aEGpw5i0aIIVn0YLXPumG9MtaPmk+n2gq1EOKJc0pDFu++bWtUrHVWcvqsLMm7NlmWpK1iekXqVgSQLvNcJcJVAUK+l9zPorTb75XqtUtWo6paTSXJPJE0knNt4q3zfk2S9tN0lEPCYab3EsA7SCqLI4s8L2B7rEjtY9ST8+/3Sqpsr2om+oQxWq5s96IpFQKMNKBaA1iv2ML6thQp4EEppeUZuk4FY9G6iKKzzjrrrLONtjGiuH+PIgoPoOXcpm8lfy0AotUmMdLEpGkcpqySKlWZ8+kM81qUGBexAZ+4VBGCSCKENX+JrYskFiKFDQ9KoZqFDZXf1sbqbvFuDAImXNj1xht/DQD4xhvfBACMtgYw7I0dccWpcwZ2m4Z6NCKvend3H1kuev2Up//ZOxQdfPWrb8AtgZuAVUwn4+rWumpx/770bCBQPe8TddbMZmgYsM5y8mDaMIXj6yi6QL7NFJsQXZ0xY07TqorV9pLLzWLV7HqN/MUctA/AhKOS23eoIOzw6EgrXMdjph02OeYTOt6auaFzgiowGpxDeY7GaP8efe/G1VusEYZYKr4wFxa99rXV/Q9R8f/LMZNEMYsUTe9a1V3qc87fhhp736b5duFzvwUAeOLCp3Bwmtv3vkZqs/cvvwkACJO72irXpfpHWlkvOmvJXBdgmQ+sV5ZouGJevP1BWaJe0gIzCEovrmV79uQL29PPtEeEg9Jp4zl79dLlPZkvPqmgliijqRqEfB2mIb1UpI0zfWat0V44JtGwkwhB57eNfrYU3mnxpc11PagqUbAI6A24WRn/pmucRsHlQBSheV9FC71PpMDX1Ql1V88Gy5mO9Dy1ZCAkW4fFNXbRzMr/josoNj4odnfv8cFYBYa8F6E57vPscw33ZKGpqxYHY7rLZ/zA8M6rVLCmJXwExMUiaSYyUhYBbAnHjhfdWvzu2g/pRR8YYe3iJxdOmVkhoPXy0JAjo22OjvZXQE0E4O5dkqf4b/+7/wYAsDXaxtYOTarXXiO2yvfepErjw8NDld9Y4PDLw1mE2uY17u1yzUHBXfU4PdD4IfpCFrD0wKIHk4CP0iymAEDXSPoqV0xAaNomXhvNnISVdN7iY2JxYXbB484ugen/7598CQBw6ZkdDLjxUzunebS3V6ENnKIw3OTpgI77yfNnMBxQLcaVn3OTIlsgOEkXSfoj6DMjhDhucpTLMyGY1RskFfJ7KEvTbvrWppsupp407QGLlZtXhAg9NBURnBAmgOrqFQDA5E2q1v7Y730S106y/MzHSYp9+gKlOevvXUUmUhycrqkyi2ypRoYYN4uLlDwwqtk0Ssh7+V5Av1yWffG6qLd8fST97Fyr6ahUckMW4fR+FwKJmLCfrLVa6S3Wti1qdlLlHs2yPC74IZUFonMUBpQQTqwN0WGUKR9Msh4tMaJsppesZmUASgnzm8x6MsiUYCLp4ch+yvj+AzKWPsiLHK3KJQlJwyVzUlK7cV5v6qa3zolb6fG+ppPfsnWpp84666yzzjba5tTTXUprZEWuAlgZUzhz0TJGq+B0zWD20dFUvVLRTXHeKygnILZKCpkkVZBWVa8QFYN6qrEfcOrxrnmirj2zJc8hqfqMYGz8zbCUKlvwQtWDdWvei/+1nCbZ59aVBwd7qK/QGL37zk9oOxcrWJ2E8kmqQEJt1SDyHofsddc1pa2OONX35NNncOYseeZ5Th7PcFCgxzRTCbkNGpVtdgyuN9qsxScjz+DcAyKKmPZbBB2DsZpauHKF6gCqowm2e+cAAH0G+Lb75zCd0Xfev8mRbEaV7fd2j3Dmpefo3CVlYE0UpdKq8JRjj2NtnZ7fL8M0GtZK7oQxoe8I0B0iZVxeA5Cz57/7LUpBvfwbv4v5eZIcvz1gavCQrnVmSljxYnkfrTWwMmU3ic7xR3VdqYeeF7JsBPWEMxX45PMB0GM6r1ymej5Hy5HHgNulFlmGVtI6et/EObac5jSItRhiPoTY6mCBgWD4/GOUAVCjtIpbvXqmiRtjoQuULEw+1gfJZ9q4KBhlSsTDCZjx/ScJgX5voNRaqRORgnJjcj3eECTVlqHkWqiWK8vbptYISNcqvTFDlDRfw74waQJrWadsIUXfRRSdddZZZ509hm2MKO7eYZXIItfiGdE1KQuJLApUTJ0VrKJ1Timthgt3QhtiXlL1gSXvZ2C44QjSSGETIGPWfbS63cZ8sVaZRy9UANK0zaFZ0ZUyK05Y/L9ZBccDFprDA+Td5DyWIsUuOlPOO2Q8vlrFCR+9D/VIrHprMvYfvE+Kq0fjezh7njzyM6e2+fUUzA7vNwje0mhLyYavR+1EgjyoR6LRYAgJZiP52BjhRQ9QPDGnwB4CeUqnti+hmpD21QtPUwHi3bv3cOcmRUX79wnfunWPcJ0bH7yPb37zewCAGSvMWgOpo1qIQRVS0UpxGauIb6VA38aWuuvwi8c0k0YI6qxGsDIk0W38YxXAtBx9Hr1HeleHP/lbnDhJ0dndGeE41U0av6xq1QuXSDVf5yOmjZb0PcF/oGCvY/JDFoDpEUWhPVaPHfT6Wr2sBZE25uYbxgTANPte2UPOxWk+wQcFNJYLaJP5pUrWa6ihMbKP92jjFqNzovkywMyYSQgtKqGABwGWna5ZOa9/mkdxrR6HBIZ52YtRC0daNrN6LjVHL4IJ5XmJNoiMu6jOOr0yQkPOUKLmXxYAXXSuqAgZDzSfpDXMStO3uP4eZ11E0VlnnXXW2UbbGFGMD6QUv03alvLTkAtLer0+siXtepvnmm9HouwZvWOytfTK1Hk3YfE9JO99pJZgFA9gBgCLqdxlKZGoHGojjhLpE0oHFI/b+6C5y0xZYOxm5UYjEGFMLORhFetJ6Xv0Kvo948MJmoo884alHIwrAUeefNYj77P1XiVYBGuK9EOADy0ZgzXjb9Y0rdXL7iD+SNtInnmIV14mraJrV0mqpN/r4YknSOJjsEXH9rffJ3pnnuc4PDjk/Ulka9DW4lWF+GrimKefYe21C+vS8w80k2hafXgL0CS4HodHSNhty+Y1Py9MMoMAaXtK1/bKD97A658h9tzTQy7g3KbmRkeN0UnT4/u4aX1UQ1rCI9L39FhDfE9u4ybUMFysV4FxsFGGPnvp0o+llna91qici1By59UUhRcPbGcdpwAAIABJREFUm3tb5Bksq9bKPZQl9Fjx5JVGaq1iYz7BNuSelKhAmFST8VjbPOfSkMuahd8AgKIoFth+qVmbazMv2caaTNu5GilKDGnr4EXmVJZl8E7ub9qirVtYHtOcm7hlWY6eqNeyNJK0YV3fcjWJsVeh3wds/xj0WIni0kMpJGvE1K02d+C1LIJcPsDJkfm4oKaaTfyWHmhM1ywDRIklgHVsTrT5BB8mrAppSLayEMTvR3psun9+1XRaEgsqJx4qGSwhtPcRwJRwWXZrEfQGaTkNlGVWfzemV7AwhkAkD9gMCC0d03RCYe3uvSPVm+kPecKZCvcOqYJXOo21rQjkGXi3ONOssfE3pYo+JAMhx+FXx1TO/e2fvI3XX/sVAMDHXyEq58deeAF371Ha7N2fEbgvQ+qCiw8s5bMHfS9NLylXXi9BejOvOiYrHHRjVunNuljgIzGVitZrZ1ZLOtKHWQJsA4ALGcAaS6UjZ+DOu9/B5MY/AABsX6SxPfcyqQXMrvwUc252VUgK1PlUSV+PI4ojyjWjT3yIukeSUnI+ILCEtwxWU0W9I6lVECp5672Ot5AS4L2mo4TunGdD9GQtkXuE91k5p+OQZHt1PciSCnh9yChNVs4l6HvSTM17r78lwoLD0UhTPNrhU5w176lhUnIuedHTVJY0p5rP5/qdnMdtIM3gfYAThwbRZH5K868yK7QcQTqIBc+U3LaCSNPHldolO0wc2BVy0MNbl3rqrLPOOutso22OKFTmJERZcX4vK0QW3Cj4GUSXhR7x9FWtao6ddCx78A6xKlJSMgKKLjzzski9VJqcOoqPFlGk3qSCw3mMfoyCvInXuezumYQiqi/8hw1JZ9C4D/HM5LitMQihXdxOog1Ezzg6SAGGL5fx8XjMUiMiJRGEAl4iirF4HIeopbr7kKqkna8wr8jbFNBNlGiDjcWQEnQFG2L/SDk4H4FitxQ92BBg+DrLeM99hb9+668AAM+9/B8DAN760Vu49NwL9PlcaMAEkCKrdeBcKzTCCCKGRvoYZ2kowWMp3qddSX0G75PrvOoz+RXwbzNyuNQKe+HvuKtMU0lWtKpgkqmyqJ3kTby0Sis3RsfSMyrc7t3CPtOsLzz96wCA/ic/T59du4rQ54K4XQa4x1PAcKqTyQwm2Ni0SRspcQRiAgBJp8hZFbCsQuBbArXn00obBRXczKjHr2havb+lhappHRyvH45bL1cTD9+nz5UOLWSaLBIxZA3KrE1k8JNrthB6x7QypaX4PmHaf9s2KnfuOcKft5WSd3bOUIFoxXpk4/FYo43cSkFfTDt7VqvwtVePP+uLBLrcj04r0HUtMEHvOSlYDK1Bv8eZAKbOZqDXqZvoeHtOXRvrE1KODEYkRaR0Xv2oo8d21llnnXX2OLY5olh4yEiu0Kb/XaCySXtFH4x61YJlhJDS5ZJcGtbjCAsKsOveOx56WDzONR+tvGcRcUas/v46C5pjTBLZSznt4w51vWbSw5kWRin2EQFe8a5aAafHDrOKvDa9jNZFj0cICyb2D0h84uRV8vpp1CVe22L+2ISgXn3JXqEBcOXGFQDAP/nD/wMAtZt98WPU2+PHP/5x8vuAyQyMnNcCLiKfSw46XrXloSRIaPFKrMWvErzlo6DDPq4tnCe/l4cAMEDcCvU4K1EcEF6xdYewnukRS3689nGcef05AMCdr30VAFC/9RZKBsKVrkuiawCi971QFLgsMwKfANzijVsEw7pS7BmPGKQe9PtaXBeLOqGAuNx7rW/hK8YXHOtEBc7r2wy5zlOJYn1CFokEkUbwBF6fhDRi80yBa8HjPHHj9XMAqFsHy159weC64BJlr686aNJAC7AaZYjcic2yhT4bALU6BijqTjEPQHSuhLQiUigtmox+q8dKvKL75b3DbNqkwwfCmmQRiuq3UV6Hr3d6aY+Z6xsfFAJkepsuhMLAEPZO7IkbHxRWq5l9IwMRL6KEq5puOuYgTZKOipXKD/uk4H0sA2BYXcBDCArwaXpnzQKz9C3+N1k05Q+9EMudk9cfweM8KMLyxUdMB8m1cq1n6WREKWMTQWHN2kjIG4wyMBaAVyl5kSpsYyKDRlMiZHkeNXdkcbAmU+HEn/6c0iV5VuD9W9TNT5krrIntDGCki1hydsv3QrBBJ3xQLZwkFSEpH9k+0RQPid5YzAQuXZ+FFNEvx0LM4iYd8TwyXhinJVU4uzrDrR/8AABQcJe1C5+jFFTz+U+hyYhJVhxwWuXdy3DccbKUWokQYodAuZcTKXQxPQ4fGT1aA+FDrECW9SOwcOX2Dkqut9AFMkTg1eZS/WzVyZHFWHuxZwVy1nrKOUWaIemVnvSUzrLFVNnipeO5lfTfjnpOEbSXh90hd+yUBl5FUaIouWNjj9NkWY6mFnCfHZssdTrlN1mtIsSmaJLezmGoWRViCszCoWkoPTzlbGGfx7EoctS53F+SQk/ujZRQoyPA90g654+Z2F3qqbPOOuuss422MaKQUghrAcuNipTD72IKSnvaKIUR6nUuPKhW+LHH2BKw/ahRxOoBrPt/+nM2eujCkU472SzTJulb/C9/z4TVvJhZG28cG+6tHt/x5y8OoYOPgHKSf7PLTaBs9DqcUmH54mUGclEz1eeK6QntU2ygFONIW+Y9ORfpgVlMPYl2TVZIyqyJUuaqZcXphyxGRwvXJTkHeYmaVNJ8RlKDSPw6fi+NKGRfPlEYW7o8Rk72AfYLT1VpmtMn6SLyZtE67N64CgBwIO/zs79NEcX45A6uzVn36XlqtGWf/QTanxKwnYscuPfcijPxzE0kNigYqhGIV3A6CTjjPSQedEPU0sODPWwFqu3Y2qLjmdc15lxvIdFrlsUUjqZkNAKJTYck3ZnnhdYtNBrFBK2SlnOR1CpgYkOkBZUBSWXJGHiNhgXUtrmoZkOLjOS9LM81aq5ZS8oFD8u/K6q6Oc/v2XSKlhW3JYo3yDRSd0wk8AnAHSqh0tNnRV6o3pZthLbsNFqIuHVYWHoWPsPxgXIXUXTWWWeddbbRjgGzIzi9JNQZIwaYpVwXvRe9N8kdRu9At3rECCHtF/Go+4htDldB6lhg5bEGuVigqPKP6n7ie3GnIXWvaKMEZ4ljFY6hpB1n6zxYq3TZxLNTTi7gWsEy2FsvEkqfDIN4NwFaIKVU0WBif4T09/3ib2nO2CYYk4B6PnpqSmjIDDIbPTk6lwSbkqghi++JN6v9DGK3VsXLlJbaesgVj+zliGkoGJ8GkPHypf/9OzM5bgujQKdc2tICluVg794lrOf7X/sXAIDTO88hsOdqzz5J33v5U2hukH6Wq4ngkFmvqsJRXUAKvIzSnJVYAK8tP8PCfbg4cFIFHbzD5OhQ/wZII6pklVnBA3xwinvKHBAA24eAliMUIdG4vIUtmKIqWktZphirRBI9ptw2daP9K2SuZVmWHLbmMDSSEF22FPeQ6mipQCfO9uIQZNZocV+mmGyMYjKpVpZI2buETJGozqraM30mFdrGRGxH9PjaqollDHqf2Q0JjLCwLq2zLqLorLPOOutso22MKOQ5EpxRSQ55wreieAqrdMpY0BQ1dGIT9VjcpPSvBSkF9gClYMyYFc8u3U6L5bC6j9Sijv1SBJDuNpFUWNFGMQZeGAxOtkvyfWu2Xz0IuxJNwSTev+wiJd4ssZmW/36QpUVlikfItQixt4YSslxy7OLVSFBiIuahQ2RDlP9JNGyMRCGqoYH4f/EUpQGCRWRfyX5NgPayUFaGRCW5UigFbwlIdX34KPKgdMYIK/E2uYlyMsJ0SdRSkR7/khxJ1E5Zz3papiin76/rq7CWDr2mx8LCeSTn7uMpoM24YCtv0UqnPz6Xy3/9NwCAUy/9GnZOkZzH7d4Z+rnnX4E5fwkAULOXD1ToCX1apo7QXk0W54xcTwK9aPvkupilm0IL5KyFZy/86JC6Hjo3Qm9IKsd9Zg/VdY2W6b/SDVNCJwujbFS5p9q60u5/xZDoo0U2gE+wKwCwGu16zfHDRA02VUBOClqFeaTnwHsre0WaVqFzadrYKlVuJWtVBVai1iZpM1twJCSdBD2gUYbxSWGeTEH5Q6m2jTKgpFAWSOaYYhWrE3dBuuiYePmh6LFIJrzE8nKj22B0sihwaGN7wyAz2vuYnlhJVT2+LdZYJDfuhp9aWePT/y0/CIBE28gki4P8pnwtyT0lPxCF3eS91WAvXUA+bG2FScJmnVwyWUKIDwFZdHykxdqoNshfDDoH4uiaOA5yHS3WzLP4oDNLlbFp+i+VKpdFIc61ZABlTJM6CpsvXjlvkvSMtBkVhXMXb5aFdtry4OT0RIilAaskigTw1LNckw5N7cMRMDabCfFEm5I1f4oWJa8rI8fCe9xP/ebX/wrnf+8iAOD2mL44PH0e7SUCto/ef5/2O7mNUvj/0rM6eggLjcbojzXHlvwdH9axlmqZKDCfjDXlOdomgDu3RmW1JYdicnFGHVqXpItAdGtxZGtOo3nvtOZAaPltM9cfz/S+lTWp1vlZZLTw5nmua5rUaQhxJweJFwIk5AcATVPrWYtKRTDQim85F/VNvNcHi9C5i7ynDp7hXtutC/rAlhtA1leXOODaYtpmsX5tQSXiAWuJiSnmB1mXeuqss84662yjbY4ofPRql4FRAaish1bGRupskjqRp2jTxgpFfjquiyw2heOpLdMwiaK57P2GpIpsFZE0yWbyugpSByzLKSGERAF90UsNIflPYiv7oJKxxd8Xj9uYtemMR4ouQpIiUpA4nrOAvUhaV66GBdH319QjwkIKhLaKHtpy2iYEH4t+Evl1rarV400IEHLJ0vSVXo5kH+LnsLCm8SmWyMeYpqKWTi8Em2gmJUOgtPDlL8SIIr0+D0o9pfZRRhY2xMhRadkhg2HKelGTZ1lyWmX3ne/h7G9+AQDwxJnXAACzZoTq2VcBAEenqeixmR8AnoFtvr8l1dLCxsydpHSSVNxCfLw0TXMhIPh4gfLkhqgmVMzmOf1SliWWp1OU3c+0OjokN24mWmB83M28RWDQW7ZfRJq5gE8utonpcpOcu1IuNOUTIwW0MiC8j7aNFeLJ4iIy660qO4vMeK4Nn6J0usNgtMW/z/fN4SEm4zHvjUyZviGlkYusegYnrVVVaSJeFJMWoQI0yMfMzy6i6KyzzjrrbKMdox4rj1gTMQeNKOgjizYqrkqhVEI7FIwiuBhReOVhyu4/vLcVMYLkibmwv2XMYZ1XnkQna45phVprUu9+GTRKmtuY1INZxDSwkAtf+izgQ2MUa85KXfSFYkClswJhyV+IflryiY5zVC5V/f6QeNgq8sXb24AQEvcHPEbqmUfcIqzMg3gkRhsj8FvWRC9J5p8JiDGKRGdxCASk9MlciVFtElksJeHXRb7r8KQHbfNRWzABGbuKPceFYD5HIdRnxhfygqU0pnu486O3AABP/hsvAQCuuRnMifMAgOJZeq+5fxltQyBzjwc6k0gyZFHnLcGEtHdPeksvBvtxLfAq9BKlWKxV77tlmqnxXjGJjPtuSGbCFLkCwBV76s41iTozb+cjaNyamo8nXtdM90/vZdZqGKBFc20TwXpG0OWaNnWrxXJR3iiC0kLvDcHH4kEsmrWxGK9gBdp5HRTPLKyo7wa0gldIK1kTFZnjMh3vY4vFObyY5NDB4k2OX2OOqcyWuMUoL15vTtEK8l51keRBEVLcRFgDSYWu3sTyvw9zQ5m40OlbG1hP61M563a8moYxS+cewipzZV1Gad0+kCw6yz+/DiN81AdGen6rWkWr6YHg4wIdU0SyjdeHuhCWgrULqTr63oPnWvBhbdxqdCFXdUJ9GMhCHtMraagsUvUhismppDjUaTGZ3Ei8EBjFEiMLJn1ucoqKTk8cHtl94mysS7U8in0Ez45gI4Gr5HPKg1ENK7mAjdY9ONz90dsAgGdeJ+D6/Jlt3DYEHg8ufQwA0F4+i/aIqrXD0mJiEyZjHIP1Hf+W39G6HGsSTSbeKqmp0S5vTQMIM6jPLKO8r3sXParY3KtV8DjjfRR5ibKQ+gzavuZF1oQAayRfScdjbaEpqoZTRFVdaXpTRAHlArZtq6kkmVF0v0mKSsDvNjpUDL5HrSePho+7ZCn2rMgxnbN0PO+rXwzQO0WfT6Yk/Dibj3n42sSdSx4KPLH1AYrU5M14Xx23vnSpp84666yzzjba5joK1dzx0NJY8Q4k1WJMrC9ItlfnQ91km3iPWHhNMMK4vQFW0jUG0d3dUKF9nGy5vievSViyrlp6ueFN6jNJlKHAU1qdmWytUVeSCrNLvyXVz4sVz4jf07GMkdtKsibhdq7tUS5fkP7mwer103GQ8D2LldAKirkkwkoiLW0Tu1RHYYvY5vNBGrqyDw1WJXJDnGP6HWvSb5CpGmwES6VCV4HGOoa5qhdlQ/QopYuvs5HGm0s0FWXXvVu8RjY5l0WiAhajxwWSxsrpr6ER8GtyjaNCgIHjimlppelthcxKuogjCXbKe+hjsku1Ere+/20AwCu/9zEcMpV0fJ7B06cuwty+AgBoHTez4jqN0rUk4wvA23h9QiKxzUeH1ZgifrRcV4IApX7q+SEqH7R8jIHB7LLfx4ArrK2VGoQaASVvz5GFyWKLUtGB4toJ1zqEIM2SePyCU0VeYaIWudUISFKasT6s1barRmvLog5Vzikt7+LS2WjTLZHFt1qjVfF5lmVPI+mWa1paY9EbsFT7Nl2rhq/7bDoGMo52hLsbrK5H0NqkNdGDLqXm2KxOF1F01llnnXW20Y6pzCbbRNEMISSeaMw/rgKTwPos/N+9qcf7gMNafdouoAgLL+n2cagiGGsWxijo38u/9zDvPY5pxXwSqSz3lDA2aM5f89IuRAxBPO+AOAxLlfDGp3MhRoiiF6V+aAgaYYWlIbULg5v8ocjog+enRqohwAjYa+J5am8N1e03+rk6aGn0oK1yRe8o8bWWSBrYcN88jlGUwfNIjtUGjQQdR4G5Z8DWG6AmQPfOj34IAPjUb9zEM9vPAwDGLXmp7vlXUF++SvvdpRy4ES/cGi3IFBygWWkV+3imw5bMJwGxG6a6Ou9Rcj+K/oh6cZQnT6ka7CE3b7KZRSOFeRz1bG1zO9OqwnxOlFyZOrPZDMaKii1FKkVR6N/K0fBS7BcVKWQOBO8VzBZVAWuBhqvGRc1WIwpjtJpaKLTTukFZsCIw72M6HaNhgkJ/SH1F+n0qJnSu1f1JtOtdUFxI+2JsCBgeZm4+8oNi484TltS/FrYs75GkPxa349dVnHstcL0KUkfA2KwF/zZcqI/42bruQS+mFdwatRolLejEN7HiO32uLNdK6IOlDbG3taaPQhwHuQMTLrdZGueHnU7r5qnWQkTNNyGMIL3c+mBDZFOFpKJYX5eZIsavAvkLVfiLT72Ax783DBK2kT5cvT7opc5BUi7GATkP8+w29Uq//eMf4dI/IhD75pQW3uaJFzB77mUAwPw+PTB6nhbexmaw3GWudJJCqVYYcx/G1k3tZeag/L9pauzvEzNrWNNxnDx9Cv0RLZwNPxCDsZjN6W9wI6f+aAcAkPcaXZjrahJ/RqTppQrbpZIDy6lum8xLdiQyA62cloprA5w4QdLqAqpPplyrYjJNwfWYyVXXDZzIl3hhkwY47iOvPcdZ7mQ0GsExW0xYYL51OocX1rbHsC711FlnnXXW2UbbGFGkoGVs8KGfrmy/zuH+18mMMbEaOHkArwQN6cNZvWX2KoJdSHcA5I2r56dywnF8N7WETWs4fhGNcRb3uRQ9+EgblajAG6uic/otk3i4WIwKQjAxrcPbkze2BJyb6L0t686srYuhDxbeoqDkAYB55nW/MTVoFqmvdNa6w0zrP9jTNLFCPc6FJCwRgoDWiKST6KOJJORVReeE8hvV4pWH7/R8gTxeQADA5b/9Dl781V8DADy3/QwA4OhUjuyZFwEA7WVqq2r2KQXlbKPnKYoN3hp8NNknieCSt5bnB7+dGaMCgOMjERZsMGAxwLxHnvloaxv5RCqiBVimCKvX62Frm6KNI8/V29ZFIUL+MWq6JWOYUmDpYM1S6JvnGQLXRbRtPJmTpymimE4okpgx/dUHr0B+KfTbAFQz+lxTfXkWNadE/DDn8837OJjTfkV63MMhXyKcHLd2dPTYzjrrrLPOHsuOwSiit7f89EztF94C8u/AwgOc2OPMG59EIwlKHh7sNUXn9Bc/jg9V3Jgca1jCHOA9ogozR1HGwJgEa0Ck1RJNb9G7IcB46biARbwivlC+dW2kINFO6u0tjqkcayqZbpCe32LUgHipkohCIoX4Xe+TcVGgcxG/MAvHGrf/KK6y0QiWox9PgDMQ89gtT8QsM7ANgaw5+4YHV67gvb8lGfLnfus5AMAHoz4m554CAMye+wQAYD67DwAo69vIWQdKrkVtChRyvR/jpMKapWW5QFYj9vQ9OY5qhoqxhuEWAdzUEIm89K1t8ugt00ins7G2/ZVmPyF4pdNCNe5SfGqJuh5iIyDJthRFoYq2qR0eEDVZCvSkEJDA8EWyiAlAIZ/rfPVxjjFo33D7WuMLxTS89qQOirM8KMB+VOsiis4666yzzjbaxogibayznEdfr/KaeAFm+bPElM0SP1t+8qVfW6fntLLLNVTE4wtJ1vzYWgrG6v/DkrZ82iRoWVHWGKjsQCz3hxbBPKo8x0Lqe/m7SQp9RWYkUQFe3mf6t77jQ2wzKv1jkPSvkO29iQ2OnESh/IXMqisv2MfCtbVx7kTWaoxUls9lLVwh+zdA9NBkH4INmdgeVbpPJscUEqVk8dJ1+2TexcZJooAco5YVgkyKz2hq28CvwSseRoE2ysXEiMxqBGW1EY16uklAK+Ng+PrkvsbV730fAPDiF34L+P/Ze7NgS47zPPDLrHOX3negsS8ECIAEQBIgtXC3JVKkJC7WOCZsyRPSvNgPEyHJb/M4enGEFZqZsGcc4dGELU94keygHbZMjUe2ZFGmRIrEQoAAARBbN4BGo4He0Pu995zKnIfMf8nl1Dm37+1GN5Xfw61zq7KyMrOyMv/9B3D7lj24uC+Yyr55292hrqOvAgCWjp/CUuQoVihRmVnmuU5zuZYgR9qvx0HKpY6VUSeq0oXqfgZ/PuLYVB2x/IVzwUprdXWMLVsDJ7H/xhCqZCmmXL10secEShLPTvQQ2pIrTy5G4TeClCD+ZJ3GhK/z+7EWFy8G50Ub619QaVUpXtSEw4sAW6JFE5naSqgQ0X2srJBCqufI1BznSpmO83uZIUmYdX2GMlu/zPRHUnERBO96QSkOmlv8k22O4AVEGaJVFNe+8kyjNhlqw6ASKl/Q19PuAfj8JTtfxEc33vOiCo7tZYQ1pvnrpA5OUiZafokhFZ9orRAX5abglPkqQZdX57gv5aZa22x8tspLZmgZB6//41cVr2mj//S25KeW3Jq8/Drh4Tl7G8H4IH7Kn0/lOV4cr4sG77z6MgDg/KGwYdx+z0dxelsoeO6OO8O14w8AAOw7r2BEoo0tJBpZAkAB9zbPp8L7mhGxdEokn3JulBGm/doazo1Px3PRj2J7MI/t+x6LMc903C/gncRz8pYU0qU3s05Y1o149vI1MoudTKQcNZPjXNFmPRpxb4g4cr3DmpP4eEBMy2Bpcw7lx7H+5aUFFlURsdqPneTnrozf5aCJnhoaGhoaBjHMUTDFqHb4Aa4hp7GvdTCLXjNLrYjW9BiUVE0pOhCWWnEIvdxWkVToqtL69LmyWGK2t15M40YMTMKahzYKJc+3hbCtoRiLKGMZ5ziqsDxQt14o/3xmpV3KxBMJhanEV/E6E9c8qC6VBVItfCt52XppGz9CuEEuzqHbpRynAqZrTtrpUb7H9ULf6rJ3YD04ZleejCmEkk/nqbUGuBREIq899hcAgNvuvBs3bg+ip5PjIK7xu/YCAMZmEZ4CYlEdkwmsmcLGJBheGWriqGLe67S4mRMjAO4s3bZgJInRhTPByfDShcBhLC4uccIgSv3aAzCg0OPRG9uPmfP1uSjMqhcfYa3EPKPEblohTiNFXuTO9eytTROxn4zFuxsiUiWOrQfN71DpeLwGjrYVTXP7IJxM2us2MvHQOIqGhoaGhhmY2+GuTMZTQqiz64SnqMq2y35W+y7aqvCvz8pCUbpGEUZUhRVlGMc2Yu1j+eyUU5muoyhUJzOQKBWzZxljWEUhBKlwGaJnMPxg4picGh7D52L5kVFROUVvwEmPWL9guYSEJFBtzFgwbXZL0TOdpq4zHUUHkUeTjLhPnq8UlwjyYUmVC67XK0W/bj+gHdKIK5E61gsdUo1+swGvE4WvcBLUboijp5K1k97iyLMhV8WZj7+O3beFEB57TgdHu3fPBvPYS94Cdmt8VlC8LmJFxoNbmczKKcccIrmgcjLfaU6Iw6/WdVF5iudE4937CUcEpgnYT6Iyvr+I8VrQRywtUuykbRjFuE6rPaVdFbcA+g7YrLazklciljfGsHnsKJrdTiYTfkmcnjf2JcQOS79ba02aAI5HLeVsaGqurq1iQWLkhDqMVcY2qUTgcjFXrCdALVQVxSudk33iOtkoMvZsGoYshUT/K4uVz/wBgm0+v8VwyauXyVYc+ap8dURPQ5BvUYmIWOQkco8kHhKU+MWL5ZStWPuY6s6WV8Z/kk0sl9mFD7v0fQCA3hjeiMR6zoofReWjhLoXCJZDpOjukg+c5WzxoAiL7P2VAov1w1NF0uxMNEAXJfOfBHIUcSi5DVw8ETaDQ889i/vufT8A4IboKPxWXJH60QgXJ2G52DKJCux+zOHcvZkunDBqQafxkP2z3Fiq0QIqBBP7bhjZIIwaD8qzTdZgnCTRew4vvkopEnqL0WJUIquuiF9ESmTotmhrRNlIOm43R7Wg98Jh8cXfgb8feNiFLi3nHJCt9PRfZw1n5OK6sBkRxVKaxTOyAAAgAElEQVQ00VNDQ0NDwyCGRU+0mXqhANgfgG3NE7o2XEq2M/mHqQEV5jmUN6IM1WakOTGtRAt5jB6ng87wfdPzGYdikVLUiYsq9J6YFqaikdAFYk2FgjB5w7XHsLKrJ1O3xOAdQvVRfaF82UbN9FM5p8fWp+UBi2zYYtGckyGFtMS+QhJZ1ibNRu/Fnp+6RAl+rCgfTYxP7XppgNERWq2aD5BxN0rWwu4oRtm4J684ZeUV7yB28vQcr0SqinDkuFzMy8c+rSkhCr3H3qUyIQCOxg8GvbiDgztFfdGc0xAJmLXHwMD4UVKvsz4xwQXEa9t4oTHF095hFNvWxfhBh599Bvd89vMAgH17bwQA3HTnHQCAU88/CTcJyuA+mmYu2AUYL0mdgBhwmNtJsZMm3A6eOzESrTOdiHd4Tnj1TcpYApFzYLEf3WBUiHyJJuER051SJqI4KTo1hxHDd6+unMHaaqwvmpt2ow4LMeQ3KZ3tIqUzFW6RuCmdE1z8KSxH8fXF9+gxid/VZDLh8rSWmNESjwfP4+hTQV7ky6MFrEQ/DTcREZhPlP8k6SjXknnROIqGhoaGhkHM0FGIxNbL5h3OZRFB09t0pFPP565XlFL0kuuoeqxXa9mE9mhqmcXhaSu9EPKFAnag5rKckNByYB1FvGQ8MlG/xMvpvSjqSAluICl1iSuQoLubLF9V+h46kpzcSjuJe+kMROeRZ1AaeaHIWYws+Tm8s0lxZ4qhCtfzTwPzUXkyPqbCGZqpn5hRSlOGl8izpKx4+60jOPn6YQDAbQ8GTuKW28Lxldtux7ujGF/ofHS4O7+KUUwpmrQnS2wlEgFlXlwJz6z7lxtnmMpXmNyfSx9UjWT2Ks8p9SKA5BVxE1JSG46j1EVnvNEoHG03YoOMhZFeRsM5ikTrnCuabJSjKjmjSrTXjvs+WgwcxXg85hYvLsXcGkvROdA5jKNXtzbh1RGSNwNzKrPVx2ayj6faECUmSSbo9btZXC9ILK0qSnijV6c5IIp29b/Ez4sVe2XZlBIQIfEaiZxExCAbi96A8mfN10ZuhvpA+Bz0fE2v6XbwJunAoaK7fLFSIjBZGJUshBaaXs//dAVLowBIu6UPZTndm6xXlwHqTIdx7Hwfc4OP3RrGJ94CACxfOAkg+AYAwOiBh7Dr4eClffbZp0P5p7+PUUwU1Kk0BDGxHiv+LSvVTUHQGPQqiCYPCLd2I+KSEpoUKbdwvqqyP076VFHcx/5aO2JFfschORZYzEQLv1d9mPAGVHpmL8ZNYcuWLViJYchJLLW4uMCbEYkTaQM7f/Eixqtrs7u8QTTRU0NDQ0PDIGYEBQxHLcZgrtlpkiej4kzlmgGwifFgrhXUxFEpFRt+bIbJcJWqMvm/vriQ6nqJxY3/KvkVN1FTfXSSKCvI+zWsvBWjAZMlNYITBbthcZMKIshWEpYV5pxjW/taDFBGafjyekEDfS1Se7bCMcGAvGBNR/F1qO/CkRG1HMQIKYvFMXeMnOTw5B5sFq1bWjODRlFK+rJRG1tngEkMBT+xgSJ2/QRHng9xn3YtxUB6++8FANz5qU/j7eUg7lhZirmn3ziB/q3gb7HISvsezqTGDt6IpQDPHZAvgRdT6mpLN5OjmAGSKjL5LHS0mLFGD2nTw8c83uMoP7K2Q2fJjyKM6Whhgc910eyVwr8vLo7IshXb4thu37kDoxhD6lLkFPpJzwmOLl2KyY+iAnvt0iUWlfEIeV+I7jaKxlE0NDQ0NAximKMghZ0X6pH2LaviQAm0bNEV5340ca0p6xVtXAi8DQoVU9UzWzkM+ozbcF7sGJXg3Rfl4qVe5MGil9BhyeM86iQkt+UjuTxvNieqqdRsPhtA6CedpjX8L86D8ZQFXOwYmYX6ieJY+DuRrrBZMVslK4NoZiwGdHv+8r8qCQvu0VkKlR5jG/UdXn3upfDEPpz72F8LDngHb74RR1cjH3VL4DK6Wx7A+MQboSsxMZJO7iTKbDKhtbCRF7MxFlIIf546tXlfchCbQyHPx4ZJYitRcFuIniVArvlI0Ttj+d2urQmXQfPHZnNhdWUF1OfVGHfrzJl3MY5jSVyr6x1cjGS7GhMWMRfhfTHezKYAqXRgA2gcRUNDQ0PDIObKRxHSzeey72Frpjz2f0iaU5QarONKQ9IabqQWkY9znQXBrWT4NTPa7BoqzjHazkWn3GRTT5dS3TUHm6o+Ah46mZJuNyBOXGwpqnJPsEOcM2zRxKalnecyHFGT/J4m4qQmdTgYRw5xNW5V+kXHWmKmPKwC31fUFM7mVnze+8J81SiZtVh9E2Vu0I1SR0zNdZBBqFNcTBefSdYviYVaxRKq1vpURzgbMhdIv9QDPsjAFydkUbMNFy+Ec6++FHJV3P3mK6HMbe/D0iRY5titIaLs6J6PYOXl74X+nQ4Jg7Z0I3ZiIyc1StDpYHg8JBFRz5ZhxHno8cg5CD2HCZozrCOd2LUQIfnPcJd8Qz5zhvXeS34JnkLicEf9nPS9erc5lyTfxoon6yrP71YiDoujrPei2wl917ZcaVtnoZbsbRoGN4qukw+RG08NZk23OldZGHVDrlA4ousAyhpgM2rLFLBXDFPek0xMtVD36UfjWLcpXtvkER08s0nBHRf2iVGLfCzHOm3HXtgcNycxKS0n/Fx+CcZXRYYSCC4VFaQmtuw2DmVjG1Gy/ooG4FDRiVkoJX6qbRRX4HsxALoYC2nEqZYXMYlr9fmoLH3qL/4rAOAjd74PO7v9AICLJnhtn993C3DgdgDA+Fwwq+3MKhbJU5mC5cVge9qrP/e4Xg/0WG4U1XmiHjDNxkB/0bINKXKaiWwhHHnTcXIHskgXIT98TsA6NesygjCZv9TusksbRRM9NTQ0NDQMYk5ltlE7GrHSWhQSfyhFZs49pFRf7Wm5aKvhWoKOI8QerDr+E6d9TeNGBZFUnCAU0tt4VgrS0SnqjVh6qspaXxWt1bnWKUKmynmrYoxJnCaj8iSnOag192xZ/yo5wRNzXiCeD7+pOJzqs6LTWLTBzdTfS0rWGsxHTdcSAXmleLcxXhSJLKyZ8HjQrUefDyKoe597Fgfv/TAA4KwLIqi1HctYjLm1Lx0LIqq1lXcwiulRR47iHVGso56dGSlvuIXdkPvgPH2usWS1a2VOeuV1X3kv/MyEqOePoyydcUJOzQ85J5IXEW/WuAxqTiW23BVA4ygaGhoaGgYxw+FOdqqO5dGREsgUd1wQuSIVU8/9pVNVXCfQcXucqVyrEYCsnMh0WUqaayLX0RvlFqiIdwoD0lufXLOXQc7kyjlvZtCtKoKpy3RATOA5MThgU14AnnR5/LB46Eo+2So9B0d0dXLOcTTdCofNDTHTKUzdpUHdzQg9YrrTUQwtYc7zMzrSu1wM7Xnl6e/hI/eFEB67TdBfmKUl9LuDvmJlZzj6yXmMV4IT3jI59DlKBGTQE5u4ybGIaigVtLXBqp2TNpbRpPW7TrkSp0p3FefinGOxOsIznau68FbaSeuqFxZfGxwV02ODwzy4UZCHoPPyYIpjw0YXTprD1gv56gJS7ijlZ4K2ZVyr4MRoWreqjBYAEgPFiyYV2zg1P8hB1/ayAbEFlwPH2DHkBU6iT+eq1kwkjrKcp9tXFsSATolP5SNTBVR55d6Q3mDlY+xUGdlQ4kbIi0THmx/W1FhB2hu7zuBvqFcfPUuy1JOYAKv0JYMWybFCFR28CSIkZ4LF0sSKXf8oWjiNoi/L0VdexU2vvQgAuPnBAwCApdUel3btAgCcu/Gm0PfzR2FWo6gpegJ05M3s1PJpRJm9GUpp9g+ZQ/Q0zRBCNgHKejh9owCcIqLK54gfWTkXjTKrE+V3PONq98mGwuOWUCVZ/dh8NNFTQ0NDQ8Mghs1jl2MUTVhEs3hMgok0eqb+IHGAKLS00eZ+cadXBr9JshrkHIawdqUC0kCYvlLBV8N8u2tJjqVuDrV25OV9+TvpX8ZjqnI1M2ufHXXBlKqm8S2fXfqJlKKItB2pts0Xf8O4V1SDTAk7FUU075QbUyMtUyiuE06BFLqc6zlyr70FhyPXTuMm7zP0OBO3EUVEpq7Q5h5oMQ0dM87XGKBLAqABtcRdKtOVUlLG9vdSjqKqGuvZ2cBn3AZg4FhcE8soj3lus/fIX4zQo9pwM84hN2EuZNLF79dYVW/0oI6Jp92FVRz+7lMAgL9y38Ohjm2LOLd/CwDgxN7AWfi3d8BdDHGL1uJiEbOqwnrHHtkSD6oTjix5P+lHMSUARHGC1gUDo37XFolStCefieP/iy+fvw0v3vlJrSXnWwox9ZWcG1DxpXiJk3XPZ9+jbrkK4l62y5fzmZ85ECON0DiKhoaGhoZBDHIUC0ty2YnYFYDIkfsxJBVgL0nEeefr9Q6Y71pKVleIAod3uDyRyUY4ilr609k1TN+9q8/IqdP0YjhUqNqkWPXc5ut3NAMy7BxZUkq5EtbAKC+ySJf0ooyNeW8iL0BRWolzIorbwllVH4KCO+dIg44iNVU1Op/vOqW3uVy/U1wJKbV7LWPndsT7rKJbdQrXzCDEezVEmWe794ZT1SYpcFOGOnj55t7a6lXkicSM8ehiilL2t8MIxElQvC9vw1U/Bk6+chgAMH47ONfdft8HcWE1rBHH9u4BAJzdtQfjk8thbKJ57LiPCY/gQVFjmXMyov+k8Uj1mShgil/ajHUTvoeB2GJE8dsp352sCvNxFOU1eXbyHdK5LIZezRYgfEvxPfOYVriZdSiHhjeKhRgszHv0tOCzEo2yeXlQ52xMmTUxACYkXoofvw+lQx300SM56sZfMzH2Gi4LycbIc1uJOPr0nFE+Dd5k4iurPjtaNDthx2Xxk/lDYlD+31gV9mAGETLNF8OKYID8iIwB+kLER8SRg4/9JCsi30MWft4oJMBcLiXR7iNe+6hUPcrjVZ99q74sFBYT2vT0JpzlT48d6GCwcuZdAMCrTz8DAPjMAw9h754gctp34AYAwIXde7G6vBMAsBqT69DL7uCVRZ2EZs/FL95LOPKNQESpfCL8uxna801CbglVD70jY1FkDQwFkZ8sQv9scDib6KmhoaGhYRAz/Cgkvg7tUKLLi1SL9ehIHGWUos/LTyCEyt3wttZw/cErNpgV717iOJEBhGPilYMCMjU98aKsV0H8xBI3nlPiKE6WFJ/tvIRjJqyXs/DKTNFoKi/+ZEV3olwUsRhdyRP1+F6MAeDT8uE5GWsw4zOqUqLZOWOshIuPJrBG949+xHcwMiNW3r76/A8AAB88+ga2HfggAGD3nmAye3zPDVjdE2JCTVaCv8WY2Effw0ZBl0m4nvI9rDeek5hnl6KWa4iBSGCMyJTq74x/8TkJTqg6xWI3kdTU0iBXn09VzAjA1ziKhoaGhoZBDHIURNF5Z4XCyCgfYzx73LJSyhpOyi5Ho2ID5SZh6vdAfJaG6wjMUToxERVyXGTQklOUb5KwzTIXmEOwoqwWOXbkdnXaVdYhkPLUc/RawryyanHoU7638d6RN5hwlNls7nqlvI1cj/MIWnEEmT0Q4lx1cku4pjWZVE7FVMudWr33wiFkSu3gFJhS2t6DjVCofotO/SYFfjRa6ccYxbDhp4++CQB4/vHv4oHPhyRG23dtBwDsPXgzzr91BAAwOXs2HoOuwvgVLOQ6CtgqNZvrW2a9qzQSANVxjbISCnl63tRMu7IW1sJaNB1FQ0NDQ8N7jUGOQnZpSTiSW3YE5Du3Z0enJIHRtb/Bvze4DiifeZEbmlQdjzwkDD8Rwb04p5GoX7gIwBHXOhYLJ+IQRKfh+V4xdDHpiaSt66U+ldwbUi8Z4DLBT/0w0jYdN030FuCjidc7It34yzQ8WGxy7rTF13pareGVs6Ap/jIXw2ZKXriztaBnePXpp3HXj/8MAGDnjmAeu2f/fry9YzcAYBzPTc6fCcfxCsdAssIyTG3h+jB9PErT4Cv/vdV0DleivmAltSmPGMSw6Ik/BsPBykSDWDPd4uJK+R2nnzXskV3bc4a8n8UbsjZQQz2YT6mzGUie4yvnquUzdjP3kNa/K/tyLctZVWnFdZXlqkmmTFluCEP9TLw+ST/qvJhpqnPsdd2T1y5tFEY2CvYlMGqjCMceTtquCRQEJWCfJT3q1htt0KOIqeSNZsvTxdVYiZVFScAMUIyH94Bn2VOm6Lasa06MgTkKQlVsk4l2vQQb5JZ6tVGwrlx8GrhtlvwGDAf3owx9J4+8icM/CBnu7v7opwEAu7YvY9f+oMxeOfkOAKDfGry33dlzas7G9jvHXveZc1HR3tC3KfONl6pShJMjTaI2H7FQGgOsZx1JxUvV77vyLJGsliazqH2jWr9dWfc2svY10VNDQ0NDwyAGOQohXQ0rzzS7HI6ecyFLulSJ4qnZvsKETekFq6aIDdcvmFVQ3AO/fxXPhk4lHFP67q3xcGxiSWUAS7GjKBw5PCtyTQw9S9Oq9z1sfK5VnMSQiKCM+hkfrLpnIZFcKanXKLa/NypqK4fhlQYTF9El1H5GcXv9rRG3YVhxnXKyXt1Zp5aTeF4m/ZiNbgc5KlJMK+NZLtaRV/rqJbz85J8CAO77YAhBfnDPFpy+KTjfnT9+DACwFj21zcoCzGrajsRoWVHN0yj3WdzAkGL8esY0jqY6BtA8TFNmNzQ0NDRcBQzrKJgZ8BKBJOMUvMeU3Yp2PpEtZpZgud6s4UcUQj3a4pyK5CBy5j6jjK2lyB1sTus74WDdRCl7WSFOlHA074QpyCJf0VHMK8dNijET4JNnq1Oc3Ch9BisCkGalkEud15xEHEdjJDozWwtrRXt6zmCabJ1isyldhaJKAWCS1BGLTcKPxZHB2SMhBeqZN2Ouivs/ghv2BWX2u/v2heMr0+XqxnaK6pW1pcY1AJjKHWiHxvwZhYXF9YKB95eGPUm5XKPunXdtnTXvBzeKvpfJm+fFlgQrKPIZa9GTOqubVZzbbCuBhvcYapIzSFwSVqR4ksQekIUibgY0q5zx5eI6cey/QP4InRFFck9Z5+JtnbHIxaG9jjiglKDTFKIGplhsvC8VqBKYzklsHkUcGfaB0H4MmVc3tdUZjnzA12DVppfl9Ua5UcCXiYuCKDhuFOoc/+I+jeJ9E1agU2Bw0/ds0fTiU98FANxw2924Yf9eAMCFm4K39pGlEGh8xcg40HOsspzQi+GQ6In7mfQp3Uw1rlfRU83gpGqco5TeQPx3nUr35pnd0NDQ0LAhzPCjEPESp6zMEokkKVFqlAlVZjzz4V6zuqBdT0RUVDynDkxCiV5FDDFH660qUzjWrs289zoKrRtESqk4IxES6C7n3sakiJ4AJPzsKE2lgUQEoKRGE8V5sMd3PFotI6UWOClgajRTPotNcSU7nT7SiP8xmZQmpFkn3xKJ1jjcOImxOvFh4k/QeekzeaV7FUkhM4Ut2hvbWJ9F2bepnklc18jGVKeux0Is9+YrQQR17uRx7LsrpEUlzmLn7iCKWju5gD6yR5bEi+NexoHWhwoFnXswh2vIriGLhJuJnpTvi8yBchRM7mE/L+ryyBk3DSwuif9HhUOfWVedw6rf2TiKhoaGhoYNYC4dhdY5uEnKUTjn2JlHU8SO81DQ0Us5U+7mtb0wdzkMurZcDjzvzo2ijbre6TfIM5jOqDgZztq3hZMQLk2uTW+bJKsRue3Qw4ad9Wo9NZjd+vWDauw1laUppJwaNBUBdvy/Hxn4vpdyCFyE9tIGQk8oRpHEdYrzdtRLxQlxn7MDRnFA6buyKlWoV2Nqc27Eyw+bULixajaVjRR6J/VxmiWdf4ljTfXxmkfMUMqJwUK/o+I++85g1Fzg7poyiq2Reyz3U3lSR5amJyUSPHMGZ08cBwC8+tILuPGO+wAAN0bHu7vvCxFmz5x4GysxWZK5uAoA2Drx6Jn903lIrPqtiXXFPSRzK56jo1UcBdtgy4tnHWvy+uVboxtz50guqXVClTJ+QDeQcEtcLJ3L4fG8MKh70yRWSj2jGqDqZWMRzWWma7g6NRUzNgppKOdmZvZW2MRcSaiVUX38wBMP4AobOYRqONzrR/oCQFsrCJxLx+0vI2rKNvqIRezhJIQciWh6j54WXBWOnNeCCVn0xEsARnG2G8rFbVRojYiu4mMxpeVzlKndpbOOy/fCET7isRtF8c44+JEAgB2JH0M/pkVVrZAmXy2z1WKTYWBgJrFNsW2HX3gBj/zkXwUAbNkeNordt94Z/n/f/eguBbHU6muvAQAuvX0coyxrW0D6TdSU24Ne0moBFRHV+ojKawVpn8tzNcg6mW9+89eh0URPDQ0NDQ2DGOYoxlqMQcfZprB93zMnQaazaUyh2TtZLTZJzQv1eiEOxLpTC6uuTwrnSqDGVXE8I4jIjk22rVGUs9wjfjtputHOgM1MWWzaexYb0bneOeYqCi/YSgKv5OFzMIYGopAn8ZiDMoHNjEWsle+Lk4ZZw8pvEQGnicaSBpkyJHv5G1x2yhXRBfNk7rBggulrH+NznX/nHRw/fAgAcMPdW0O7d4fggHd/6qfwdoz/9O7CNgDApQvfw2glmNiiX431SpIz7Uge/p8xyL78bUxal3daqFHjaHOxdom5OZsNIHcDCefycam/Tz5PUovEFYHq1XNiuO2No2hoaGhoGMQgRzFek2ivsvvMx1E4V1IwLHumK3NuU1oRPKiXvYbBSZuUgjnX2fxl1lUAZf/5bTvhvlw6ZPE3zY8sD1KsBQBgDRxFcFVUmeWkW8QpGKmjZjBRoeRKk+eByWkMy4tH8TixotPj+E9ct5G+Rr0LOq90hfGa1ZRtvEZjZX0e1ilrkupLRsZWYwrJF4xJ1FGQBAHnz+KZ7/w3AMA9sY3b73oYAHDDzttxaXswlb14LHAWl3a8gsmlkOCITYgTwQFRxLR2aMpY2lYd+8xbW8bKq7GimGCGSw5Fqp0nzSzfPAMz9QxawZ1xmumtWkqh/wezAsbrd59z3Vq6UUfjKBoaGhoaBjHIUazFBCVhZ40nTSb/RIUSrNhdaUsoMfmcFyKL5nuuU7/8mmpFR31s0BQ6mTAKOcQpRVWiI0fRYw3EcS21pgUmQD9KB98asOVU1ynZ7xTquxbCY16YlAVK6u8guV+MahsAjDrDJsbElTrjhIsiyy/rJHIu62kUhc5E5yboxeQj5HhZozh+/eoFHPrBkwCAhd0h1tMjDzwKAFhdXMTSUtBbXFwN6VG75WXpbLSy9MYrxznpwhCqvZrSVe/18jG95mquliukj5gFP6UzNa5nGkqdh/RlVh2DG8Vk0pcnjdhQDzeqHMicRRsa9GRzqtW7mS8qa0/4LaO6nkkxLSlJ3m4D7e0+nc3XdRXtqLHoU+7lG4qNyiOPM3Q1MTRBk8CTtGmQcs4Ak0kmFoAplLFqTcN4TIYVob8LC5br6zlmkmFlc58tuCKKUqJEHYfK5x9xRUkYnBXCdSUTsSLTisWofqfELbT7eVj26o5HpeDmjVO1VWfkA8I4zhNfTaIBoPzmjIGPbXJ2Es/1mKxeAAC89dIPAACrJ0KO7R1btmNp7RwAYOdiWHrOOQ8bTYER36dT7TSVDW0oORe3W5/MqDOjfWUq36vV5eZ4dg3Da5sSmQ0q0/U99IPWDFuUrfmayAfvleg/7/vsNa6JnhoaGhoaBjEjcVEFOgzmIDKm0VTOFUcNWygTg+KcqiMtTdvrftRhvLCXnGPbKYo5iix6CyYHmUAaK3FGxoxaa7WzLl8j81KTOeM5OFj2GJY5WaSiZEZRy3zkXI1i7JhrSbmTsZvws9hs1/ZCsbLozKjnBgq95zIWPvvUPFzBSWul8HzRnD1zFDaK9dYmHl0ct3ePBk7itacfAwA8sHc/DowC53F+V0hmtNJZFZaLOC0tG5pX+JQ3LZEvZXUYTBNLXUtIxWPZeNC/rhQbTRMlyZxdf6fbKtvQ0NDQMIhBjkLLZAnrt+BUCrWpHEVdR4FCltbwo4Z53q2OBEqxjRLKnIis3nCE0z6n5DuhvilGlDVKdp/Mycg1UIRTFlpbiDFHPFWj3hLVk88u6fLSRqLgKdcDOeN1iquh5EceFr2LhiY26lacYeaanQFj6lLjPMieVqee9W6j35XHKLZpjbmwBXRRfm7XAq/w6ne/DQC45/77cPPNIYTH5JaQLvXEgf04/fbrAIBFjizbQywO0jhX62EscnPh/HxenyRwmk1xJxFra3qGTdH3+cpvOpJExSDX43gvjqHGiP6R+52k5UWhW6th/aKn9wBVRct1avXUcHkovGXVIkcWTtZ49CaNGyRWTx4uLrpkRWR7V3roGvGtKBx0zbAoKded6nbozUHEOrJJTdswld5VHUtlNgxUIDra4KR8rpA3qsLLJsSMR0e2+NGPwthF9MGgCYtxw7jwdggY+PoPvo8P3hIsoXbGLHh777oD7x76YahjcjEcnRFRR2YBV4dnP4vMjTmeyl7kdbJ2zBNrTG8mrKyuKvtrdcm4zBJHNdFTQ0NDQ8MgZnAUsmtd7VSlNdMxLY7icrNrmuNMBVeV6HivRWvv3fPnnU8SMl2iprIQiBXcBpjYrFz815axxkIkahJPkJLacJ5tOmeNYtWZcBVqjJTfEm5ciYtyRSMMG2L0atwlVL+J/8fyVkQLxkmfRqMs2JN3/FNiPtFDjdKFUiGDzFYEcB5uwMy6hEffRxEYew5bdN0otjfWNVkDAPzgie/izo8FL+1t+24GANx2/704/vxTAIDz50+FaicS5tzGhvfKtJPDp9M71gmoxAaWW5mbjeq1RScBq+WjzpcBfhcVn5rk3AbWyeGlpyZdiQdSVjvdfi3mzLjsiIqEtEDjKBoaGhoaBjFDma0VIXRu+n43K2a8KB9zBy9dpygLJVKtL8+JAHaoC4Pt5FpzS7wERpNmqjn5FqwUZ5Vu5XJS7zWVHA7WGb4tEYsjUCsub4e6LgrUWmemk+GMLuYAACAASURBVAs+NCS2Pb33veZzBK76mk3+WnrLEWeFCVDBbpD3T0a657lm1TikD+jhxCkr6juCvjjlAtJ5QlQsXRHKzjk9r+mWVAHrreSqoCRIFkYSFkVfNec8k32sv6A6Oi85cCBHW0mMw18mNYCSl6VaX+6LOPfRWK3C+RAFdkzlopL6xPETOPLCYQDAB37q7lDmpv244+HAZTz3zjEAwOT0GkZRWW/ZxzDm57Cd5CHxQRmy4Mesn3WWYjdZftvWTqeHk6RT9B2q9ye6nayQ19S6XDPJ3EL9I2LHPlNc186Abo4vUPetUFaHGuNFvSZTG70UmcEyzNgoZJLXvf7SBsqgznYJv1wUntmXsVHkd1yLVlWlqmqzkCpqw0aRlRh419ciyHrHqYxkJN5xlPSn93BxUSVrJmc9L3QSQE/ukW2F6nQwXRq+vJ5RsPKhQj7OOj1iktKaiMjnpzUG3qYbkLUGtqN70oWgGxku5/h76dXOlh/Vb7Up5P3MhWpUjjOFUzviZtNZg+e//zQA4P6f+HEAwJ7tO3DP/fcDAE4eehUAcOSZM5isRrHSOGwYizHr1Ng5NfaqTdzONJe4bi+3dMaaUSd46d7BW6866v4SlU2hFiViHd93Ez01NDQ0NAxiBkdBR6OIpOk71GZS5mno23KHX1899XPXDh+RUvnJlSqVuglP1ErLomqTHK4tVHitiriNKWhSqPaeU6BKImERqJDtf+D8SVzFtYV/dR5mFS2Auf8K5S8KRBFFzKIC9f89PPePuSVrOVw9icCMleREuf+TMVYFPRTRrs24Eg8RdxiKJcQKUglVrlGc8obFwlmsP1jX48SbRwAAxw69DAC446GHsXtvMJm9+f2Bs3jr8Ctw42hjSzGkXBBndRjBxCDsLJ41Hf9XMwNdL0dRu08C6s23Fm1G0MB56qhzCroAXyzE3y0VakNDQ0PDpmEujkInT6kpta9E6N16VETdttk6imtR93CtQOuTTHbuMkLBXGHUFPO2PKeYI+GYSE4OOIo2q00ixzR3o+fyBOg7ZIgUsjWswGTdh62ENCdlNUQRvZkIaYLpt8SeEo4DfI7alStljQFHmxWnNqHI+TsnU0sruqA0+ihH3OKKhfMIP0bMWvRYuxgiy7701PcAALfffRe2bg2pUm+4804AwK6bb8E77wZT2eWlkGoVq5Gj8BN5B/zarRr8jWv3ricdxcw1N9G3pWVrTqDT0DiKhoaGhoZBzMVRYMDF+2pT7evhWgLlVepPKB7Q+pyLrhx8/kuL3zdZR2Ey0+QaRyEWOtcKTOW3A7WwZjacz1cDgFUUbCKjnPAm4WgVtY5FGqtw7DqTcGJAoGrLfBUytj4rL3Y560fiEMZcT7Rs6sCOVmR15NjxyoszFucxcBx1V9SPpdVTMieo4b0uR/3T1mC9vsQmtst2hMla4AwO/yDkqnjr4Ydw8MPRAurgQQDAXR98EKeOhPhPq2dPAgCWYlUj41Tujhgl14yURXrNguvykDr9rq++9eoXLhfT8lkUXJEBryHaIS/egVkKycGNQk8gmmgUZrnvXVF+3Yl31LX8qTqHbb3s+gY5MWPM2qvbnSbLiaWzIG56ca1n96uNw/QXweaXqshwAqJ5aq0jj0tU68uVMo+93A9j9n0iLtHfBSAhyIOSNfvojcqxbVSyIl4wo8gphtDuOxSbqrWaGOEWxzLSQt7L4EE+5RQ23PmBEHJmymKQBfTrrAVGodyEWh9lYqEsBe0TkRkphSnftbfqjceBYVNh2ZfhaZNyEO9rOioDYJv1ysBjIbbj0pnTAIBXnnsWBx/8EABgy5YQevzgLbdi7623AADefSWIqlxMoub7np9JAxzSPtHz82Ti4muQf+85eO5k5VNMX59q4vghbIaovqbMnt6/VDRZ97mqo4meGhoaGhoGMchR2Iomjtnbgd1QO9xd7RhR68F8Ia619yMpRtcbE6dhQ/AGpW2mUkgnU7HCESJQxBSpWkKQi/Nbb0ougG8grW8nybTYuMN5gBzdMvsKnWgrbV/kUJxLzialPCnNpeL1SiE55pPRYafVd5mFmzbecB+SFKgg5TdxD+C6hCb1fGST2nhtRKHCe8emxH4txH9689CrOP5mEDPtuj14a++98Ubcdd9DAIDnjgfOYxKNDrB2CcaN03EwNaPYEkNmydNEOMNSkIE17lrTelc869NLTZnd0NDQ0LABzIgeW+oO8k15etq9dJdNEn1Iocoza4rLqw9t6ijy4LI9OcXROIzNRyDeRAEdz/J1rXAslNg8D41KesQ1K7ltpOCVpSUnOorvv594ThREMv9AtZPcPz7L6PlSttdW0vcWHEVRQs8tAyiuaNpd3LdOcQYkEbCaM1DVSsircIqjtoJDn+g+0a0UYdeJlkB0gTHWk3cTdDEEykIsf/r423j9xecAAB86eCMAYMeO7bj9nnsAAEdfew0A8PbZc6GyvseIEkpRmBaUqDkHEoY4Bbpe1HetMQhzY0inq+fQBpTZ9YcMs2pFU6pOEHn91w6KOChe5VA28tHkgcacq03Xhk1HsWEA6SRPN4M0KGUsTbGheiVy0inYsykgNIMOZx1zRdcCsHFTfP37M+l8Cq3ONriBQHYeNfGTtshCbJso3El6xkcP0G6QeOlTkD8uSPU79tY2USTnId7gLIZJbPjC80nE5uDVphR+rF1aweGXwkbxvgc/CADYf/M92HcgbBq3vu/9AICTbwSPbu8cx47yFdGdmZNQS/pcXuVf84ie6rgedhZtFDC8fjXRU0NDQ0PDIIb9KJiqkd1GUgZP5yhMooCrcBn8Y7ro6WqKcAbjpRhTKLPhvQoRnSoEryXUhBPXI6pzwQPQitnwC/n7kLDdBrkXsfGeOZR+oigqlj1JkiQA8NZw/myn4kZxaHNDdqMiIjI2b7tHzRi2KMUmqyJm4rhVMMofohSL1uJGUbspwqzzSqykFLrsfZ0z1l7lC1cTS0JV1zTtJIqL/ylTYkpM1BmDtw+HqLFvvhxSot5w070YbQupUnfdHkRQu+4OHMXam6+hf+coAGB88TyPy7ycRNq5Omchnu1DovHr/asC9JyZtd42jqKhoaGhYRDDDnccZVXvtkTxa5kWyXk1ZR6PWV2znlUIiKffEI8xqbupU1Rl4nuRG3PMGiPRKKULyvCvIPests1LLhmIxzefcyrxk4oPL0SYcColhschH9dOj1+ljeyYpO7jBClFM4bf2ZWK7ZWfs9YIh6B0DuwN7PS8S2kfVrY6z3NFchd4+MhJmI5OGriYcYdk8o5yHFhwPgqKROucYac9z7L7gK6rUe0envQbFR0FHSXarFYO0yBYeJdyudrRk8bAqdSpBZcBqKi3sXzv1PiSIl9xBWQYYFiRAfa79TKHaA5a7lOsa2SZEyKubuQ9xpdWAADHXjkEADj34Bmc3xG8tM/tvgEAsPPRTwAAzixuh7kUXAr7i5Qg6ZJwf6S36CxsdCik9yfT3IA9CuXDlDXLSLCv0gFXo3Tk42Huy28jT4iVOu2JviAPgjEv71KX3sh6UH6v83NFMzYKfsY1glJpyf9dhiXDtde/zYOeguu+V2SDm9OYawKVEfEiamEP5N6qqOVp7mwYhzFZPZmYF9pa3mT4k1d+BJyVzspiQgu0Uz5JdK/jhVplLqNjkqWsnP9lmJ2hRU6BF8gyRElqxxI3biP9Zasy2qQGnufVssWh0+HRrYRF++jzLwEAXrz1Cez60I8BAHbs3R+KL4TxXjx/EW8fOhzuHR0HACy4NfTRC122T/2sSncrxgBcPDGAyIi+GcRR6Z1fq3f6/bXvdtq5q4kmempoaGhoGMQM89gIbzFomJzhSqZCnQfV9JTV65u/L+t4UXO70L4HuJY95q8MtDmjEv9lxhPOedHZUspUOmEdTAykZCNn0XnL4cvNKBPVeoicC5TLWcS2EoOIm6S4AjWHM3oumWPJOfkdjtNHA0aLJURsVFDMJLXpRTSTNFGZjNOzDdbxfXmLzi4AAC6cOgsAePmpJ3H/npDMaN/eEIK82xrCAi4fvAlvL+8AAPRRjNU5xyKnruKjUj7T8eDoHOUshap8G0PnuNqKVGPaOamjfI88pvQ+zXvP2zeOoqGhoaFhEMMchSYM1pnNZr36gvVCdngJm113ohkyZ0uVuBuBfnbBUfhNesgVwLAJ4PoU6dc2alxmGY7ZG8BwTKhMsdsJ99CT1/YIwES/Z4GDh40RXTkclPEqeicp6yGKYhBnoypim3QyiUXibEZ1eZ9SvYOxjTTdzx564Yr8BfrI/XhrRDFPMZ/gxfiDp7yVtqW6WyQOiOrTM32M0hvrP/vGYZx48SkAwMF77gQA7Nl5AACwsGsn9txxBwBg5USIEeVPnsaIKyZFtBXDkSzSrq/YeShCvnCwDddLnUOOaTqN8pxqS0WncSUSwW0UjaNoaGhoaBjEfDqKaxg6F0BNNVAk4/FemUdeOzt2w9VHQkWSubdTtFO8RrlXbK90dcRFwIEMoSjsBZuQA5LDh8NpgNkFyaFgRBdQdTglSyTd9tk6Cm31JDGt2MSpbhXHn0ssryLF+ozCNUaE52JCr2XsKcfug4dePEdtszBxzCm0yvj8u3jzhcBR3PSB+wAAN3wo6CrWtm3BwQ88AAA4c/wQAODsuSOwq9FMPibLMF5/3Tmrp/U/uo2pEkaPG5dP9Bj52qLKc/9m6Tmm6yiuJcwwjy0bbApv2LpSp6yrknVJXVsvm5V7RmvTvnkhXJ/n/4ekKXMnE8o2JwPlya0+5vVKbqZN2o2iHLdS9DQrjtdGxVCDbH5149+sZ8Y6KN8NknUklIvjMOkdCqNV79CNUi9pMn81nWGfAx83FtcZWDKnjd+Xc14S/xREjFELtBafpW0M4hKTXNPj6LJOWWvQ02anwo1zEi0qTn2BYSW8Fm2Rr4kEbxDxUkGI+bQPVAf1jxIi9RjjzPHgff3Sk38BANh3170AgOVdt2D3HTcBAG59OIQif+HYqxiffAcAMLqkcndHk9kR+ZXQhm+VZxjLAacIp9cxx1KxUa1Ebf3g5nIdamGadlvyzCHoJXwj30sTPTU0NDQ0DGLdoqe5I8RuEPPvfvMpXtPKp1Qxb1v8vO279pS9872rlDLV911fCuxZ0OKDMnpmYW7qVEZmJaVwVEekiEmKFQLRuuQGW0nJGhzuvDqTiZnKlqE2aecx+2bOxStz14yq1Q8VJb8KyV4rmByntEMxcHzKeI4MwI6IHujXgh3ykRdfBgAcfSnEgbrlYzdiae8WAMCBBwKX8fbRh/DWE4+FeychHHkHw6lYiZMgH3f91uuY9xspOXwZ09p3UnKGeXmdmnieeNRacrVOe6N1oXEUDQ0NDQ2DmC+EBzauX9lsrqN0jkmeFs9N0X34lHqbV0ehKqhU+aNEaQfMG8LgR6PvSieQXWHOwsmbp+ix1qhYWSTfJ5n/xAjF16k5RicpblRiHpu3RxTc6bzLZf369/RrNaR6Hz5ZlPEV/SRT6ZoDYk+x/EmVhhjA+ejFSPocY9H5sDSNTwcnvNee/B4A4MAd92DL7bcBANyBvQCA2x/8KC6+E1Kmnj33g3CtX8NCrK83FFurfHwWT1gaNRM6BtwQpzULpc5G572ZB9xqUrddAc5irqCASdyZXNFSwex4KPMN4jzlagryIc9s72tBntfZDj/d8zxRUl+DfhTrET1Na/N64t9cb5uIVwpd8UuISudeEyjhR9+LJzLrf5XrsgTGo6OEGWcRlbW8kcinJtrkalhyXxNMlB7c+W99pPhTEnRQbWykmKeAgc6xxRT5J8ChSkGWIqoaxSnnyMudnumMxSj2ZbQWzh37QUhudOTep/D+20PAwPG27QCAvbe/H7feH+I+HT4WlNrnjx8Vi6K4IVuQMt4VgSYMLMtu5hHhTPe4rm3qxd1TzyUK8TnWjNqnZ/zmi6Ga6KmhoaGhYRDziZ4uw3z1SmMe78V5zW4lnHoTPeWYN+7NNQ9TVwDLdU0F52am6kjuFr3cplTc8aC5CHqmRBAo0ocaL+JPtu4Ue906dbgekeAUzhDpZeNVfNdYH0XVTapQivyNw8PG8OzE2ZhuBDcOv7dEC4HJuZCk6LVnnsbBRx4GACzeGnwslnbvxS33Bd+KC4dfBABcPH8SfjWEIcckviwSCfZOZdRl+98pIr6BllfXoHXLK4o6N7rWGigxFNW7wXfVOIqGhoaGhkHMFevJO+0cVjN5y87VEhglQkGSk2YPSs4pylWJY/NbfErEZYUqhnB+Ol2pd+JUmZhVYURqTOVtpTxTKF6SFLF8V7WD5d6U3hJeyg3AmOmUgpaF5+fTOuYjNTjKpi/l3Sb8k5WPxwEl67TnF7LfRMU7Q/9l6uW0V/G0qSztzWTE7EldJpUBHDvQUXnOKeEMaxbJmSzMSJKdyziScxhHlO1UWtVMoG68YQ9kFeqs6JCORMtmsVyJculTc7IwX1WDlesejAE8Ow8KCZuPPc99p8dPPmDS6XBrncTSmrg1AKKIPvHiSzj5/aCvuG1HSGq0fc8tWL3pZgDAngcfAQCceuco+mMxFlR0LJzQ3LAeC31QoI/iEtj7BXgsxpatqDbOoSfNPTQBNpWucRspR26ya+r7MrqOtJzwiuXaacz6uf5ZwcHnEj0ZTB+ucC27OoV9klPUCaVGG7Beqjaq6vVZu2+OBfeyJCjqrQDJwq7HTUpnHzvKCSQ8vS5Xv78oMAdmTZryHahJWLH9ZjjFtBfP2GSR5bzVDU2PQehNKT3l1WKciKN8es7o8WM3ChJ7GB5AHjUHXljYgZ9lUHqu0GIsFIKvzaM5LdOkf3oTnT5SecSBGtXlraov3+DUepcQhGQ1REcvBgJ9jIHioy/E5NI5vPH88wCAG+7/IABgtGs/thwIYcl3fyB4a+9/8xCOnj4RHrAWdtNJP47tWMVCVKD3k3BuYhbQR3Gf5cbNt1Ek/c/6NO1qea68Vie94n9qV5jrm7usNU7QRE8NDQ0NDYOYP8x4AaGiysBWQtSkIqvUpO9qKErnjc80X13l/aIIp4tl+U2mqa88NOeXi5RMpV/WMCk8PW7UlcNc4itf54tnhY1O6tDUHhH3Timnec6H/60y3CfltIFhMWVPJrFGiS6jmMuSl7cxbKLKdQHK67g27imVqunieb+HoVDl0ifhjooG6iOXl3NJe7OYXqayppBp6wger/8wcBQ3vvgCAOCOg7dief9uAIDbE3wrdj74IRw/9S4A4N1DxwAA3TiIm8z54xivnAr1jS4CANb8GruGK8n5VBHwekz8a4FJNwtDrgCbicZRNDQ0NDQMYi6HO61PK5RRAAqFDIyY+amtiJRrJqujFvl15ua4kd0z13PMaTsmyjZVvlDPqCT3FQ7kusOAl2hCwyZKVX2tnojlao9JeHSmZJ2DE9HnPFxh1OG94UQ3YmKLeM3xXLCRJgtRnehcLKcTBrETnHhtO6TjJ5GhpEupEUg8B3WtMk/VHVPOSy01IxNAPp2Eh5nCUcCo8mrJcNwHSczEVfANEghq9UI0lX0uKLVvefAjGO3cDwBY3hs4i/N33YNtq9Ez+7bg3e1PnAEAXHzlOVw6Gu7dQqqj8UV0XVCc+0knzZ+i65qlSk2EJzZdb0ySrrUmtpnNecz6fjY7+VHjKBoaGhoaBjHIUciurmTwgzuU7ISlBc08sRCvPDZCydbv9epvfoU4rOsfCd2jjHWAVKZbdNZfHV1UXu9m3Fevq9THMeNNJktWyOUiPalT3xVT4x5MbdJnRjqfTnI+cHljuKBzRKVqWXXNIkraG58AcEiLIR1FcSlFqYwsaeQkjlXlWXRkYynLJ0k/QzoK5z1GLugaTr12GABw/MUXcfPOYCq7dEOIA9XvvhG4f1eo75aoMzoROIstu/fjUiSRLx4NMaQ6M5b8s3YU21HpLrV1PfNWbLaTe2tmryntvk6Lq8r3tVkYVmaT7fXMtqcnk2ByelBYkUXHyBr2vRq8yPYZScSSD7A6lT06b1xFwef1IIr4YEYH4yUztQjHB4JXZr/0EevxiIfaojn9yZeFeSbLkBgBSP1DpGDaUi2Jy/1QpnkMrzchTO13DdNY7vk3grIuuaF+DycFitcsi107yX2tprIIjsgc04hXcv6NQIXLVo+2lLeaMqkl4ttySSbxlU6sU/PcnhbHa+oiRH22krxJqqXNUcVdz0xhtSDN6GGhzZHyhPMCOkEXPbkvnYrxnZ7+Hm68+U4AwPLWIILavuMAuh1LAIDVtfCs0c1hg7FbtmKhD2KmS2tBqW1OrWFhvJKOhjU86I5SFVJ7kjFAASEGps+xNOhmhXCqrXtFHK16vYTNIsSa6KmhoaGhYRAzzGMr7GdOZBkdbterc0MUXZ1qmV7vdCS1FuVLEZhm9mok/NAz63v49SNYuhx2NPd0nho1N3/WnG3ZTNHTMNdwFZTn4kob/vVq3BLJK1H8dDQsOuLPRsW/5ltrYb7pkdYWY8m0qqJcKTe4tWUsgQ3Zh9ROVhz0CpmA8ZJjXHFfJh8IVgAbmD5S9y7Ecjpx6BUce/ZZAMDBXcFDe2l5L+z2kODI7QjHC9ujie34dthxUIgvnnsLANA/ewH9qTcBAJ0NnEfvetgo4bCcVSlyGDO+JVOjwTNOq3bR+8v7ToHNFzdpNI6ioaGhoWEQc6VCTeKbVxQzpWt/KgvlcvG6JHqPSiZFDdWc2mr1F7RKJWyIloXX5HYSq6jDPKhRXDq+iuD64TJq0ErIQtaPOvdQcAgcv6ee/OhydRRD54auX04d5Q2oy6PnMe2umFSL0lmU2Z6ixpKS2opCVzgED09xoqha59UcZDYjllfcSzL3RcGeIzcDnoYqv5YpuHl81Phx0FYHOOKZot7FeqOYEaJlyVx4xKF7FyNlvnriBA4//l0AwPYDtwMAtu2/HTu2B+e7LaPwfY9jXeMDB7BoQviPpYuBs5hcWsH4YuBQFleOx2f2ogNiY4TY1s4Mhv6RdaGm44sHpYZN1046VvSYU3RIVxrrzpktb5r+rVgxJN6CsZj+TqqLa/aUZGGX6cjKnIq4q/78oeu1BtSUr8P/63a85xjwe5h9bzjUrGX0a6/GsZlmt11hpTcSSnk9ntTpuc19P/Mla5KVURNMImaIhFMPXq2pHHtmu8RrIpwzovz2VtWfj7MRMZPEBuKrcymzZ86kbL755JKIl6jdInGS+cIWW3TOq6CY8TChmruOn8ItW1vDu2+/AQB453Dwjzj4gY9gZ/St2DlaCHXEAIYXty5jsu9AqO7294e63n4H7o2Qn3v8ZtgoFowVH4/Mos0AVferXKxd81eprYVqQa2ui5vtF7FeNNFTQ0NDQ8Mg5hM9QSj4vnDxNJkhPWCMVqwR9WT5d57uMcTLCTtlamaXU0GmOGcq1FCNe2Dv2aydodAs+9+U/UyuZCK2HwkQFWUMJLKnQPxDcnVoSX9uhuhpZnPXKVa6bDFURfQ0bIqouYfyHM3JOgUpntl9zUyXxFZsimvLPthK20hxrIN2efKnKNsxa1i0hzU1LuWeVCVG1ae+c6a0eTxEjOZ0e+NjPInUiNzvJ1i7GHwk3nrpGQDA4ovPYPuuwFEcXIp+EaNgLjtZXsAKtgIAVvfcCABYuPUe4KY7wzNPvBrqXTvHLgKW1grLnRsemAHU576MUeZZgFnc8NXgMhpH0dDQ0NAwiBkchaKGUpHhYHikNPG75iym7YzaFHbavfGYi8C9Ll3Stbzbptq27Pk9BpEzNjWQ8hHl7luT6VepakWdDeUFqN6ziSj5hPRabUhFNpvJ09MKi/IV/6KhBsmp9fZby4Vreq1aXzeFUiPugSr1MLbybl36LFY+O8DkthaK6kwGkt2607qcd3JKMqyqj3gefcvUEtP/z7kNA8nLoYvzKVHasxI5r8yJ/oKpfOPRRcr/7JGjAIATzzyBu28NXtr7ti8DAFZGQbl9GotYHYd7R4vRhHbrdvi9B8MzI5cxPrWKbi1El6VXNvELsV0dOkQHPvJwB+DYKGHK8MxAMI+lW2lN0e+nHFT+lbL9afENYr6ggOq55BGqG1DoL/XEZDj5GLMJmrLvTp2jZzmuQ0qSh6R6LpWXnC/SxqpdM22EOoVeZVGvCFQS7S6g7L6n5GCb58WxyKfiuXwVRFtFL5XiNbFcYej54ZNjDTqkCY+WWjimWZGkCtJKu+c454biMUzDHGOuFfPlxlLzQJdsc/q+or16zc/npEHx5RpAXA7oST0pkb0oYRPJCb3Tae3X4rFSjKUV84kWN58qZLEDLxsFlfFG8lZTvQZi/cWLpeX/6Ht1PI7gQJ3j8xcAAKdefgY3vhwsoHbeGEOPL4QNY+/idsCF+twoDOTKtmWYG24JdewJvhju0mks+ksAAEu+GxRNAh06TxtFzPntDXzc1Wm+pWt3JoLVv5PxpR/p+0nL5RtpOudrxNBG0ERPDQ0NDQ2DmCF6ymJHX2Xk/hPGWKWU9nwOSCk7iYl+lRq6SeDmb4AKqCmfL6eWqVfmbFsiMqiIo/xAXwu63FRaZGZlz5ZyU58zpS/rGf/1viuvuAxtfDHN76N2DQaFqEqPR570BwagWElGeXdTvKJaDwov74rJufelKLgGzcVU+2LStgFelN3MNahvOuduDTCOP/sYaOvEiTfx+g9CwL8H7rgHALBnIXAWq7v2wZmw9J3sIoewdy+6u94HALBHgpkszr6JfuU0AMBFjkI4LJuY+AZYcIwqxTldXqrlawuNo2hoaGhoGMSMWE9aRnZ1t8Vh2a9W6UxvV7gt1SVQ3Q3z4bId4+IxiMJLyrX6DrJXxZSYUa9v2Euz/hupmL8WrTdVbJfnqM3FuUq5QQQBfFFvrR2DIP0Dx0lyXC97PScvIT7Dll/OUNytzXT0mmUWzc6DrtJeVsKrL57Ka04l6jb82iWceeMwAODcocAh7N1/FwBg+5azOLsQQpAvLgeTmbuzYQAAHlVJREFU2ZXd+2AuBN3EtlsDZ3Hx7ZewdvoIAGA0CV7bojrueT6T2raHYdNdDyfls6GrjkBNz1Yr9x6hcRQNDQ0NDYOYoaMoqfHNNruaB5SkXjvmlWFAJEmMUUJRU5jZKcuqmjx/HmajMh4z5ZC16/k55qDmaMMUzCm5n7ey0rkuNeMIB5/QefFShTqlurQJYBy4KsXCMmi5upmhP2bluSj1BtPqXk87LPJkRon12BzzL9ENkEMpOtE/0HthSyfPzmx0ozdGsotWLJuqz6z8ngvJ91h5huWJFNvtxZmOnmllLWLNqTKgsvGbHjnK8eGx8u4JAMBbh58P5W4OVlB2604sbQ/1bel2AABWF5bRL4TfbnkPAIT/u2ApZe2lWL+01WY2yt5YuBg3zlasnpCUvr4kG+uI9ZSaZQ2WNLVJtz7x1TSPVxZHpRZ1Wv8lbGryOF0HkgK6pfJY/fy5m/3e40pt4IMSH1OWo+/JIX1JVCYjOHztEZVxr72K9XZ5VvKXzVRm166X4hwRyZSK7spGlAw3iZ7EBJbu5QXVGNlrU4qpOJWLoWZuHjWRSWEvP7WKOBcqG2bxHQrBUvPFGMVyC3H3W+snGJ8P3tqnjx4CAIwOhThQ23YdgPExQdooKKmNHwHj8HttHBaQSW9gYzkyBujjhtSFiV10b1MJtQpyb+2rFfupiZ4aGhoaGgYx7HAXHUm0iZ6EKwZfy6HLp9RmRvFYy2XqcW9S6krfwyy3E++6XKEFo8VR6b6f1K8coGohzXN4pV3NRWF5e4dQKEZZuiPtqMWoutKYl0oZLkedqfnLV0RaqBCeIrFQN9aU4LPba1R7Z3EUUu306xsZo5qSfL31Krlc+OshqYt53BTHnIu7DCS8eIVczMeoNqeNKSPbGmMKqrq2VnjlFFtTAOcOnEZ9s1lg2WCVGp+xGst3sLB9MJo9/0aILIvusVDGL2HrA2Ft27E/mM6OF7djbMK5bstiqHbbVvjl4IltJ13Sbg/D66M2tDBKRDYLl8MN1GLLVcVbm8xpNI6ioaGhoWEQM3QUOjfhbEp7M5FyEXNQdrZClRn+o6hTD5F3ZuUBCf9RUdpO/b9+Km3nXOXKUkMUXQ2aurlsrNNDyKNUZktdSpfJvpI+uR4rmd6cjXA4AzqQGo8ZK5r2ADU2lT7kFZuKUt+byg011PiwCrWqrToysi/lctNqjUqaM8TRbEiJPQBxLkW9q6zkigc2sTasi/TKZHYS14g+cieLvsdS1FeMz5wDAFz84UuhjFvEwijEeNq2tB0A4HbfgBVEE1gbOJHRVouVhdAA0k14friB95Q0hCQvIrnQMeamzrtZ4znIjV5dxelcymzvvSh+iwhlm4U5F8JMyZZN6+QarF6H4i8Hie2kJp+uO/89z4e9XoGBtLZyrWJvXvXQraCyjF0+5uxULdQ7/28Me7CqnVk+ErVITI31NPN7MtXfWSOz1Wk+sVByzpRiwMHNaUOhqPkXUNtNS7M/Rdel89mYEIcoNE3EU56tjaSOeSyg1ivWqCr06Vu1KOaCV/dwVkyeJwaWxGhxwkycbMdaNE3f+YieNQ4B/taOHMJKTFy064abAIRseecvhthNbnwGANCvvQvbr8THxmd1QRTVO8DFDcLq90N+HPOInuywQMfk65m+lszhK79pNNFTQ0NDQ8MgBjmK0YJSIMVfbijpxiZC75RV89jikZqqrHADKgQz15F1JXiOYuozNxsFS1rlklT5q2QKN6+UcZqCE9DstfRL3VjnCKdx1TP7LeJFYVByjWdOhaWwFequjEs0zLnM4+FcF4KV5TTnPJzOdwAUZdUY2DgyzHRYK2amcxKk88y/wSC9+n7d/kyWpBk38p+wTo0f2fpSqm0ApicldnzHdoQ+Pm9igihpHMOC96vv4sRrLwAARluD6GnLDW9j4oK05PyRkLjo4rHX4C4GsRX6yG1EjsLbDp4TnlGaAvHbYnuaAYPZmcYUg/dcXdFT4ygaGhoaGgYxyFEsLBKFpGSEfUqZ0HUA4NSiFZm1VvTUJOmify4pqaQcH4lyVTQkVyKUUk4de6O0S6L5zeoaRm2nL5yAigJVjd2UMtK4tKn677TGzWQDhq/Pqn/Oe4WKrFSYvNuK1iYn5Geoi0q+V06a8mUnRSSNyBApHGuYpv7IuKj1GiAMPDK+r6w+g/JcrQ6lUOWhJV2Fh5iOs/5igPrlinS9whmSbsMqL3qlVmA4zRnEi6w/VImI8velTWLZjJYizPYeIxs9oqNHt+s9p1E1C9Hc1VPeiAlWTrwFADj29HcBAEvbXkFvQ9ynC+eCR7c9/w4WJhItFlDMj1drUGKvG9o96jou6HKO0E//mpPxr06f9LtJVomqsMfXfsYT83OogxvF0nJM7tF7zttrXWTf4k7RT0T7rwIdK4UxDZJ4MooRU2TZPGBzlspL0C3NbnFIDq8sspBaRpEts4feqOKzrWLv+cWpdhfyoHxUZDNTj1ft0mEK5GOWpE3ZjcmzZPutSdak3VLVtDAT0z74wcRCbHc/p+yppqR0bIoCAOiUrb2v1qcn67THzBIDsrywqNUk7yftnzGmeH9JrUWqQsM1z7MF1MRGYd2Pvx0pPiv9Uw9gvyPqppG2aVGVyV5+Ik6j8NdO1Z8bGVjAdKl4jo69m2C0EJaL8TisAQsLC2pvp8RCplCIT6LYZjQaoetoHOLa4hz/NjH0t7cOhsMrUMIg6kdYj0IdoV6zMILvYztpvK3lEBsuelp3sY2dteyFPTkRMuL5d49j4sK5UZQbGe94znLubhctokwPXuq72G7vEbsHF/tsjOUxpT70eu2id6WWB+5qmlEh3jIktqydlB82//7iNWsM+5JNQxM9NTQ0NDQMYpCjWN4aLvdjjz5SCRM6ToR1dJNMQOK9UPg19qYQC8wL2W/rlDSx0HTSq11WcRxTzCST1I7XNKjdlSvrVHhXqdkNKM1ryraa9/qVQM1MVk6V6Ubze+iGoXzw1wQ8lCw4osagDvhAGBjx5JagUEJZMrNNFHqH1bU1AEAXxSrBaj6aoMaUojt37cT9990PAFheXk6euba2hhdfehEAcOyttwFESUARJFG1M/tGJ2trsIYC741ie0ZwkRvYu28/AGD/gX248WDIfb20FERKLs7Js2fP4oknngAArI4Dh2DdBAu0fnE0hDKlc6dZG/5cRPxGXBxxX33vxAAojjOL56y4lDMHrCXjNW5xA5gWv2uez7JxFA0NDQ0NgxjmKLaEnbtfcJhE2WYXuQezJrLDSdwDJ2OtNxAFGUDyWhTnCEMeoTUMhYDWppG51WOol2LL8Emug0XsSrZXl9XPoczYZOQy6Muh/MtxK537LqcntXoBwHjh6upmy/qh0+fAkGlrTXlclhfqUF9j7kHptS6H4rqSKNUnZTrVwAmRHo7MR+VGZhRs2XcheB0oSiq9CkfRUhcsrA3LxcJCdDqbeNx6ewjd/dnPfAYAsHfvPhw+fBgAcPz4OwCAyWTC1774hZ8DABw6dAgA8MQTT+DsuYtpf02pJ6N+Li4sgpx+x3EN2rplG37lb/8KAOCRhz8Szm3dAhvTnK6srSbteOmVV/D8C8E8dnLuHNe/JSYxurhyIZwDVHKkdLA8SpPqpaUljNcCh7KwHMdq0eDSanDa4/SuZKrsnVjtUyXqO+B10k5P+DR7nazo3jLOou977N+3b7CexlE0NDQ0NAxikKNYXIo7Tmdgo6WBJQ0/cwWerZh8tFDoexWrnXdiAxUhfxOaXth6qWfGM0pvwY92iBZYXICvBfnsvG0rZfFXC4VTG4Yp7iFcjvxzSJasCsU2+oLzCZY/2fwwiYEGVUqXhlHReaXWTogGJikXE8IupWOZOLVN61v98ZsaC6lWv0bxLO/FXLNiwcJhVBTT71PmAd6VuR44hEbvision/n0p/Cxj30MAPDYY48DAP7j738dFy6cBwD0E4k+DQC267Bz104AwFe+/GUAwI7t2/Ef/+D/rfTUp0eeT4brpTHYtWcPPvnJTwIAnnrySQDAn3zjT3Dq9CkAwDvvHAcAtmpaW1vDyspq0s+HH34IDzzwAQDA7/7rf8WtEdPh2ELiCry2WgvXxm6Mbdu2hv59KfTviaeexCuHggMf6UNoznXWKqsxeQ5ZJ/mKJeV651jF4r7QGXZ2hNtvu2OwnsGNYtvWoIzq+wnWJpTUIy1jO8cfY8/WnQ7e5TIfGeSclw4+FuXnMJhfN1+gK6alHkb5dsjuUYpwqDMoTQavIQxtEIRZ06gc5nKDTS6tF/l+UfxN25gErptahyo0pJA2qShGH43aiHgcE/PY6SKtaxJeZjmyX8XGZUw1pLi4G8km4sjcvEvFWN57bFkKgfQ+9/nPAQBuu+02/N7v/R4A4Nhbx0IVzmFxMYimbrvzNgBh/QCAt44ew7snTwMA/uDrfwAA+IW/9gu4/fZQ7siRI/FZ4O+5ZgDTRZES+Sp87NGP4uSpsCn8i9/7lwCAN984wvfSBkF1LCwssEj6hgM3AAB++X/4ZXz729+O4xLHoGIAsy8qy733OH48bECLrLju8dFHPwoA+OjHwvEb3/wGT8VuJGa01BfLoq1oIuw9unjDnn17Q/snE9701g/ZYMTvI1zhNA8w+Mxn/spgLU301NDQ0NAwiGGOYlugICaTNYyiIsiukblc3CZXJkwxdCTC6QDHURSFXGfigCiZJCRvabpVE6eUUVX5ijonOyXXyxEn60IrIHiNsucl1eqMsL3UbgekvulIWPbLFUEMUrBzErfstTovl2ZKmtTnHNcccD4dDzYQ6NW5pNr4PpQXmTZl1SgchTIkfZrCjQQpU9ovUwtfX7ehrVWtGzDYvrJ4Odd9NseS87niGnpMhLOQV0DlleMbk4RRlKScBw1TltIA1/NXASCYv378Jz8BADgQqfDf/d3fxbunz8RnhDq2bduGn//SlwAAt0dFt+vDJPjt3/5tXLgQFMVnToc0pUeOHMVdd9wFAHgjchSxUbFNqWc0AI6jdMvNNwMAvviFn8PX/u3XAABHjwXOBl0HkAOdjfGZaBydjPPnf+ZnAAArayv402/+KQBg+7Ztod6f/Vl876nvAQBOHj8JAPi1X/01AMBjjz+GP/iDwBWRg+H+vfvw81/8eQDAn/zpnwAAjr19jJk/MiGOSym6ruO1sB9Trm9g567dAIC/++t/FwDwjW98A//lj/9LKNen4rzhuGLD57nd+w7gx3/sx6r3EBpH0dDQ0NAwiEGOYuvWsLOurVmYcXC24fR/FJnSWfST6I6/HM71F8H2eJyp1HhlTpbubJejUJ0eRVZ2P22Sm4i5mbIkyo6Oqr5E7r2upl0T8FBibK0dMBklqp2yNsHRrojhZJWBgFZS5+/FeEyj2WfG0dLtyBSBwqSoGEQVDqGm/6HIpRWm9T2GcAOE+hxVsdeIo6YQHla/A/XOCpYmlL/1tlvxgQ8EZe/X/t2/BQCcOnWanceWFkM8pa98+cu4OVL63/9eoMY/8MEPxmcLKU+K4BPvvIMde3Zyr/hHZtBAmIwn2L4lRHz9ha/+d+HcZMIK6w8+9GC4zXu88srLAIBz0QSWHAX7SY9bbrsVAPCTP/ETAIB//i//BU6eCDGe/ubf+BsAgC984Yt44bnnAQCf+fSnAQD3vi+kTv3zP/tzbF0OEpdLl4L566OPfJTn/3ceCzGk7KhDD9LhRq4hxqVyTsJqLI5iylUHfOHzXwAAHDx4EABw4dIFdl48f/58Mh7W2sF0yfR+nBOTanGYDP9//OMfR+/6egURgxvF9u2BBVpd7WBXL8Unhw2DFlnngUlPm0bo9GRk2VPSs4VCyj4GaHFNdiWxiYf6nSrxfCLyyD92WXxSG/rUntmky0NSh4co+MQYXX+oaXndjuRMvkBvSFsufco3WJdY+ZS7ZCGqU2uDqfZlqBnq2bnSkcQrTp2sKKVlD5g+HushIqb5cwClx7UxlRGqeWbrOXeNbBbF3jnQrhAAkDSY8WSvb6DNVYUFpI02Wjl+6hOfxpNPhoX/6JE3wzVjYEehws9//vMAgI8+8ii+/a2gFP7pz/4UAODff/0/AABWVlYS5TgQfA/Wosc3x0sTbw61h4V2LSws4uGHPgwA+NmfDT4ZZ85cwK//2q+H+raFBffc+bP4jd/4XwAAl1bi2hW/363bt+ILPxPa+9obhwEATzz+GFtv/vCF4D1+7z3vx9/6pb8FAHj4oYcBAK+/8ToAYOeOHejign/zzSH50Re/+EV87d8FEdip0yfjmMqcIf8PrVTevm0HAODLPxfEdQ/d/yAe/UjwBXn+pbBJHThwAATyYaExq4XH16AgjH3f82Y+jtZXVO8nP/1p/Nk3/xwA8DOf/FK1niZ6amhoaGgYxAzz2LDbWasVnKmJVd8DkxiJkeybF5Y872TE0fQSbFEwN6FYI5cqMiU+QyIDTUlrJXk8WrmDmsMemEr0JLb4QnkVrPEmUJpzK7PneuTwVS0qKpXI83EW3HXvM0V1+ZzsLJc0Ktf5tCBLl2OmWsZwKq/5SnlfKXfNwftBDrwoDiXapfhOBhBxVJz/FpAsweHagf2B6lwcLeCZ7z8DQBSqXdfh4E1BPPKVL30FAPC1f/u14B8A4BtROfzY48HHQsdCWozU7cGbbsKzz38/PlQOOSdBEWh3bt/NnASZ5P7n//zHOPRa8FV441ig+MfjNbzzdvAM3717DwDggftDDKotW7bg0UcfBQD837/92+Ex3uN977sbAHA++oGcOHECX4qK+WeffRYA8I//r38MAHjr6FtYWQkip698JfR9dXWV+7e8FERFl9YusRKbPMlpfBa6jju6J7bx0UcexROPPxae9U/Cs958800WL1H8qve///0AgKeeegqnTk03nWVXMWvZM51w6623hLrufT9+53f+2dQ6gMZRNDQ0NDTMwAzz2KBk6kamCJrURxlnP3HMUbiY5GOy5tBH2WU/EcqVdC42UyJrcm8wYqIR+Z6YNpZ7nWWOQmnFEmV2dgMzER65f55Xprvz4b2mQjWnNV1XMp+Doy4zfQxq8aJMhTxM9AUe2Tn9jPWN4Uac5Qqdhp/K2Fx3SJzVcg4EKDta4UpIMX3s2DGcPxuUwlbZkH/up4Lz3UrUYT7++OM4fTo41W3bTsYwEnWWTOnvvPNeLvNqjPvEqkAIl0tLxCjK5h968CHcckughH/rf/0tAMALL7yInoxsDCVQAg5G6vsnfuwnkv598YtfxPeiB/dLL70EADhwwwH8UtRHfP3rwVP8m9/8b7g/ciG/8//8DgDg0KuHeAxui46C5BXunMPf/jt/BwDw4v8cYkmtnFjhvixEzqKP6+S472HjO/jzb30LAPC+O+/CP/ln/xQAcOT1YC5srMFHPhT0Fr/6a78KANi+I0h7fvM3/z6+9e1wLyH15OaT8s3F9/dXog7p8ccfxxuvv4EhDIfwWA7WBb4zGMew4i4mLXGjcPQLE7gFCh4YGrDWAS68V/Tj0OC+N0okFI9OFi0WepDztgFodkvUARFx5G68urxWcIuFk5wjb0gWj5EXqNXfU8xoZSTpEIcThimDB+ZWJQq6bUMiM72Dzeupnp/q+LYyX5lXfgNJCGpev+NYkpdoZQNPK5zeEElW5ArFtUkWgtT0IP1FYhKL9D1nqJzLPfcNbMWYQl1XFXdlg+L/tv7aKn4O8URl8fWZ8QTShG4cakPEoV795mrzNb5sVjJYVeFtDLnD79mJLwsdlxeDCOXC+YucsIgsabz3ePTDQYTz9BNPAwBOnzyNBz7wAABgz74gTvnmN78Z2+CxZWuwFHrkx4Pn8nM/fA4X1sImYzv5LlnpHb+zbUshNMZP/9TnOFwIBR+0SmTGfgZdh09Fvw8KANhPghL3/ffey34X5C391//6f49T774LAHjplbB5/I+//Cv44Ys/BADezLoRrQsj/OSP/WQ4RwmX/ITH4cLZ4C9inIGnYKk0/1m8Z1nB/IXP/3Ro6/PP4cibbyTjfPedd+PX/qfgv/Hqy0HE9nIMC3L8+Al0MVgjfWDjyQSWM+yRL8mIN6jd0UjpA/cFK7Y//P/+EPfeFTbuaWiip4aGhoaGQQznzF4IYXc9HBYXw27soj/FJJpYjcc9FkZRiR2Pi4sTNo8dj2KY4rGDJzfdioTB5z+MqdgAZjdtEKzo4fSMmgMRxTXbKXMSF8PiLc4BrJTgRdhuP4USvlZRcG2p8vtyq6syA1XRU3bnDE6r4TKhQmZPA1Ho27dtY86DlKLLy8ssZtq6NVD8H//Jj2PHriAW+dZfBJEIfT+jhRE+/omPA5D3+MwzzzDH5DndqApvHk1K778/UL83HbwZ//p3/w0AYG01BvYzhlMpT2KfHn74YRaxUEjzr371qwCCp/O7pwP38MgjjwAA3nfP+/AP/89/CAC45bYgbnv00Ufxv/+DfwBAlO+f/Kt/FQDw3W9/Bx/6yIcAAP/hPwTz3y996Uv4zneCafANNwbv9d73LNaRdLHR2xxg0RYd/7c/+C1Wfn/1y78Q2vHhR3DmTPBk/4vvfCeMaRyf1ZUVPPRQ8FN58ntPAYjhzieUfjZ6pfeOuewPfTi0e0v0A/nhiz/Ez30xGAhMQ+MoGhoaGhoGMchRLEX5pIfH8lLUTcTwseOFCR9XRzFZRxe4jYWFBUzGYfcaRfPHcQeYPg3LS1S4sdM0iJlwe5MVxbli13vPznXsFAOj9PhK5ptxO/yf8oDcbBRRb6uYzyw2PTnlRGKnqC4VjxAhe8kTTDF5Lc74omw1TauuYz3cxY+KhnodmDkP80jJRnSEhHfeCSlLP/zhD2NnVKCePRuo25WVFfzTfxoUr5/4eOAU1tbW8Ed/9EcAgPMXg5nprl27AAAf/8THcd999wEA/vm/ClFeL1y4wNyAjwYyC3bEOoeFUaDkP/upzwIA3njjDbx++DUAwNJiTHHa90B0fiNl9t13382K6oceeggAWAn+9a9/nbnUz0VHwSef+h5ej5T/L//KLwMATp05jTePBoXypz4dFNak3/zEJz/BaV237wxK++WtSzgZHe1+7udCzKdv/Ol/5bGkYWYOq+uYwyIO540jR/DTnwv6it17gi7hoz/2MXz/+0H3Qe/0xZeC7uQrX/0KfvhiaMdyTLw0mUyqjqdbtoT1nExriRv80Ic+hBOnTmAIjaNoaGhoaBjEIEfRdWE3H416LMTde2FhOZ5bjcc1dPG37Va5vLFkIRGdTAwwzkxVmYJMQrp6ucay8pyz2FxUnfGYyvLKFDe2zYvpqVHRT7m8SjgiuH725CQyR34NpUVRfr16I9QQQRGzykorN2XO9UXJuYYNIw/dEk4mBxx+9TAA4MK582wG+od/+Ifxfs/moodeDVY4tuuwEPNR3HxLkPV/+lMhTtLu3bvwe/865K8grsQ5J3lqSFfhZW248UB0MLs3UMHf+rNv4atfCbqGR2Koi/3793MU4r/39/8eAGD79u0cm+r++4L8n+IZXVy5hFvvCJFt90ddwu997d9gEtmYnbuDW8DWbVvx6c+Etu+/ITge/qf/FExnv/LzX8Ef/9EfAwA+99PBRHi00OEzn/0sAODUu8EJ7pVXX+XR7MkCaUR2dR47dgTL0sXlsNZ+6ctfxvbt4dxj3wnWXb/4N/8mXn41xK1aWApL9i/90i8BAJ75wTP48z//MwASmsN7j0WOVBvNhp3DzsjZffgjIQTKpUvB2uyhhx/CP/o//hEA4Dd/A1UMbhQimrEYdeHl84YRxVKjtVWMInvYRcUJMFasvpj2WTJ3JVNSXnitijOkFoVc4uQNajlgLx+ZrbFqrzaJJXaTw0NbiPlilvQ2FcBs7sY2j+ipDAS4Oc+siYEGRRvJeMRTehG6zL2/FqK7oY65RU/0L8B2/YRx9IH4/d//fXz5KyFr2y/+4i8CAL7/9NMcGpy+kZtvuQkPfCAoV///9u4npWEoiOP4L0VacS2KFTeJSBHB6g2Kt7B3qmcI6AlaN/4DN+oN6r8odWHp0lZrceViJmlXbyeifD+bLF8I4U0yb968xWU75KfbtZ5F7U5bky/bzTzbervoaVfUwUdFkcjCvC2Sx96KfG1lVcM328/xmGWSpJOzU318evvykQWg7PlJzf2mj9uRJG162W68HmvJA1B+WNJLr1e8k7d3ltbZre9oo2apskNPleWl96P3oR588t4aWGprrlwuFpmPPaCMJ+PpOeUzpwtKlmq79/Lb+rYtMFerVaVpas/eJ/6Ly3M19hqSpNqr38+R3c/1zVXR5C+/StOCgzzNVYpKqlRsnk6SxMb3Z9w6aKk/6Cvk73zmAgB+RfRTC68AgP+BPwoAQBCBAgAQRKAAAAQRKAAAQQQKAEAQgQIAEPQNpH6gktjTKqAAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "im_ind=15\n", + "\n", + "input_im = np.moveaxis(X[im_ind].numpy(),0,-1)\n", + "input_im = input_im/2 +0.5\n", + " \n", + "fig, ax = plt.subplots(nrows=1, ncols=1)\n", + "fig.set_size_inches(7,7)\n", + "im_h=ax.imshow(input_im)\n", + "im_h.set_clim([0, 1])\n", + "ax.set_axis_off()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 103, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([480, 242, 116, 255, 393, 89, 154, 246, 286, 224, 254, 1, 500,\n", + " 103, 153, 55, 426, 32, 259, 245, 503, 216, 87, 348, 472, 108,\n", + " 168, 140, 206, 198, 157, 249, 196, 76, 57, 295, 336, 177, 112,\n", + " 120, 49, 274, 438, 199, 19, 17, 221, 63, 62, 335, 363, 443,\n", + " 31, 142, 247, 99, 235, 285, 146, 294, 386, 34, 373, 150, 433,\n", + " 159, 172, 464, 179, 269, 355, 371, 132, 209, 16, 314, 323, 223,\n", + " 227, 402, 11, 67, 405, 147, 205, 45, 43, 106, 207, 504, 330,\n", + " 97, 166, 144, 417, 276, 4, 74, 152, 316, 317, 82, 27, 10,\n", + " 290, 98, 488, 263, 52, 452, 378, 461, 441, 95, 479, 143, 457,\n", + " 75, 35, 467, 109, 100, 409, 485, 511, 501, 353, 502, 456, 498,\n", + " 92, 390, 36, 365, 398, 136, 219, 345, 305, 362, 413, 463, 38,\n", + " 312, 420, 325, 324, 251, 510, 300, 60, 240, 59, 396, 354, 389,\n", + " 465, 180, 8, 2, 64, 165, 442, 131, 70, 307, 232, 352, 41,\n", + " 191, 408, 430, 265, 18, 505, 289, 424, 5, 282, 79, 123, 236,\n", + " 288, 496, 111, 241, 277, 65, 77, 182, 73, 321, 148, 359, 96,\n", + " 379, 380, 366, 308, 256, 33, 268, 135, 372, 54, 375, 302, 266,\n", + " 237, 356, 322, 231, 279, 414, 174, 499, 66, 492, 406, 358, 3,\n", + " 459, 445, 69, 434, 425, 260, 407, 313, 126, 184, 226, 23, 12,\n", + " 347, 25, 21, 47, 273, 262, 257, 428, 476, 421, 309, 275, 187,\n", + " 399, 304, 419, 243, 344, 411, 360, 319, 509, 439, 491, 454, 118,\n", + " 258, 395, 494, 44, 214, 340, 466, 357, 20, 332, 418, 422, 392,\n", + " 487, 155, 163, 261, 13, 162, 477, 192, 105, 272, 248, 169, 183,\n", + " 217, 278, 193, 381, 506, 72, 296, 40, 388, 233, 15, 370, 228,\n", + " 24, 156, 203, 215, 7, 138, 469, 455, 204, 114, 234, 264, 280,\n", + " 151, 460, 253, 303, 470, 48, 185, 450, 68, 130, 122, 211, 160,\n", + " 451, 61, 382, 374, 478, 250, 197, 327, 471, 175, 493, 440, 385,\n", + " 225, 341, 137, 301, 244, 483, 6, 368, 200, 238, 400, 220, 149,\n", + " 267, 252, 141, 320, 202, 39, 115, 213, 51, 334, 171, 437, 46,\n", + " 447, 104, 361, 462, 133, 306, 333, 489, 173, 403, 431, 346, 139,\n", + " 102, 178, 427, 210, 475, 93, 338, 339, 291, 195, 391, 229, 188,\n", + " 497, 349, 482, 458, 113, 329, 145, 53, 26, 423, 384, 81, 14,\n", + " 415, 444, 435, 369, 343, 474, 383, 299, 287, 326, 194, 29, 342,\n", + " 85, 449, 377, 292, 212, 58, 110, 315, 364, 189, 80, 448, 230,\n", + " 507, 222, 495, 90, 293, 486, 101, 432, 281, 30, 397, 484, 78,\n", + " 318, 297, 158, 468, 201, 94, 42, 164, 129, 88, 401, 119, 376,\n", + " 404, 328, 416, 91, 84, 429, 387, 239, 0, 56, 167, 127, 490,\n", + " 22, 271, 83, 298, 367, 170, 410, 310, 436, 284, 283, 107, 128,\n", + " 50, 270, 351, 218, 9, 412, 481, 350, 337, 176, 125, 473, 71,\n", + " 121, 190, 181, 37, 331, 86, 508, 134, 124, 117, 394, 446, 311,\n", + " 453, 186, 161, 208, 28])" + ] + }, + "execution_count": 103, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.argsort(np.std(activations[im_ind].numpy().reshape((512, -1)), axis=1))" + ] + }, + { + "cell_type": "code", + "execution_count": 105, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1MAAAA4CAYAAAAYY6KYAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAHDUlEQVR4nO3dQU8TXRQG4NMppUUBjdGNce3fNP5NNy40MZqYKCAUKO23aM5wGaalDNKWz+fZTJm2U1fe3Puee25vNpsFAAAA91Nt+h8AAADwFJlMAQAAdGAyBQAA0IHJFAAAQAcmUwAAAB3sLHvz48ePT6LV34cPH3qb/jcA8G8px8i2zrjlvXzd9rler1dfy9flddFzV2GMBHg8kikAAIAOliZTAEB30+k0Iq7TpPy7VFVVfW1LoiLaE6ryuQBsxtZPpnKgWDSQAMA2KUv68vXV1VVE3J5cRdycTO3szIflfr8fEXeX++V9kyqAzVDmBwAA0MGjJlOrrJStmjg1nyWpAmBb9Hq9W+PUbDarE6m8TiaTiLhZ7pfJ1GAwuJFS5XPbfqv8DQA2RzIFAADQwdbvmVqkrBUHgG1zdXVVJ1CXl5c3rpPJpE6Vcn9UplcR18lUys9ELG9mIakCWC/JFAAAQAePmkyt0mXorm59y55hBQ6AbVOOTZk2ZSI1Ho8jIuLi4qLeP5Ud/IbDYT3mZRI1GAwi4uaBvuW4WaZZi/4NADyeJ1Pmp/0rANvmrgYRzTK/nEydnp7W97Kkb29vr/5eTrB2d3cj4mZzivLZzZI/YyTAeinzAwAA6KBTMlWufK3SBCI/U66iNcv7lp38DgDbrm0MayZTp6enERFxfHxcp1Tp+fPn9ess78tkajgc3kqmIm43o9A2HWC9JFMAAAAd3CuZWtYEorkiV26WTdPpNM7PzyPiepUuN9kOh8MbG20X/Z69UwBsg3KMW/Q6x6psFHFxcREREScnJ3FycnLjM2VSNRwOI+I6rVrUNj2/m/fsnQJYr7/egKIs2xuNRhFxPWH68+dP/P79OyKivuZ7h4eHsb+/P/9H7TyZvhgA/MNyzCsnUOVkpyxzj7heSDw7O4vj4+Nb9/K7z549i4iIg4ODiJhPqtrK49vK+wBYH2V+AAAAHfy1CKi5YjYajeL169cREfHy5cuIiDg6OqrLGr5+/RoR16ULVVXVZQ25Mrdspa3X6yljAGDrlGdFNZOrTJLOz8/rZhR5HY/HdWVGJlIvXryIiHn1Ro6X+Zler7dwvJRUAayHZAoAAKCDBydTzbasaXd3N96+fRsREe/fv4+IeT14NqD49u1bRET8+vUrIuY143kafLaCvWtlzcobANui2QSi3+/X+4LzWo5bzYN8Ly8v6/Ev91OVqVXuo8pmTW17s/Je2bACgMcjmQIAAOjgXslUW1vyfJ0rbNn29fz8vN4D9e7du/mP7ezEz58/IyLiy5cvEXG9ijYajW4dSDibzaRPADwpOZZVVVXvb8o0Kf/e2dmpx7dMka6uruLs7CwirhOp/Hs8HtfVG/l54yPA5j24zC/LGfI/+RwAer1efP/+PSIifvz4ERHzRhTZLv3NmzcREXXZ33Q6rcsgAOCpaU5uyslUXnMMHI1G9YJjTrQuLy/rMbVcmMy/814+o6qqhY2YNGkCWA9lfgAAAB10SqbKcr9mu9e8Hh0dxefPnyPi+vDBV69e1WV+Kd+bTCatLV6bLdcBYBvleNVW5pdpUo55+/v79UH12YBiPB7fqtDIEvrJZFJXgOS1qqqFY6NUCmA9JFMAAAAdPGjPVHlgYLZzzRWz6XRatz3/9OlTRMxX4vL93FRbHm4IAE9Fmf4sqqhojpF7e3sRMR8P80DeTJ8Gg8GtRhVlq/NmMtXv92+1Y28eUwLA45JMAQAAdLA0mVqlNXm+n6tpWQ9eHhiYhw9mXXj5frmCt6zG294pALbVbDa7cVhvyuQok6ZMpg4ODuoKjRwPh8NhPcblWJrf6/V69fPLVurl7wOwfg9ujd4s02trTtE8i+ohvwMA26xt8S8nRdkOfX9/v251nkajUf3dbFiRny/PYczPKOkD2DxlfgAAAB3cmUzdt7yuXD1re9ay5zSTrPv8LgCsW1t5XSZG5XiYr7Mkfm9vLw4PD298b3d3t35esyxwMBi0HkWS95rj52w2U/oHsAaSKQAAgA5W3jPVJVXqShoFwFNX7mnKca3cO1U2kIiYJ1N5L/ch556pMplKZfokhQLYjKWTqbs67HWhKx8A/zdtY2XbOFd298smE2VX3JxMNSdfg8Ggft32u20TLQAenzI/AACADu4s87tv+d7fLPcDgKdilXGvmTiV9/r9/q1252WSla/bGj0ZcwE2QzIFAADQwcoNKO67x6ltv5V9UgD8362SElVVVTeZKO81k6kcN6uqqhOpciy1DxlgsyRTAAAAHaycTHVhpQyAf9VdR4q07X3KtKotccr3jK0A2+NRJ1MA8C9bVPJXTojK8r3muVHl5/J18wrA5ijzAwAA6KCnnSoAAMD9SaYAAAA6MJkCAADowGQKAACgA5MpAACADkymAAAAOjCZAgAA6OA/zw8J6O71ynIAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1MAAAGKCAYAAADpFhtSAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3dWYye51k38KckMx7bM/Z4iWM7dhNnc5M0CUkXWpJUVVskkIoorQoSBQkhOEDiEIlTOOCIA4RYxAEViyIhoCJI7QEFIcJSmgZCmmZrNjtpndix43gfz+KE7+gT+j7e62/m8juTsf37HT6X7ue5n/299Er/533/9V//NQAAALA8P/BeTwAAAOBypJkCAABo0EwBAAA0aKYAAAAaNFMAAAAN16biQw89VEb9nT17thy3uLg4cvm7775bjvmBH6j7ulS79tp6F6ampkYuX7duXTlm/fr1Za3rnXfeGbn8/Pnz5ZilpaWyNj8/X9YWFhbK2oULF5a1fBiG4ZprrilrExMTZS0d42pcGlOdy2EYhsnJybKWdI5HOi9pXErNrGrpuk/7nI5jul/Sua5U1/alSM+J973vfctaPgz5OHZV+52uga60b50xGzZsKGuPPPLI8jd2Ffv5n//58sZ+4IEHynHVczpdP91rPN3X1XMkPW/TOzK9E5Lqfkrvs7m5ubKWxnVq6fmdnsXd53R1HNPzu/vc77yb0jM6XcOpltZZzSNd253je7F1rsS75HLWuXbSb4bOu/9itUq6Bk6fPl3WfuZnfmbkxlwZAAAADZopAACABs0UAABAg2YKAACgQTMFAADQoJkCAABoiNHo9913X1n793//97JWRUumGMUUiZiiKlMkYhUHmuJKUyRsilJM+1ZFQab9SvHn3YjIKpY0zT1FqqbjkY5jFS+axqQ45zTHzvFI12IV+z8MOTa9Ewea9qsbx9uNW6+OYzpWqZaMO269e992Ylq78fhd1TzStrZt2zb2eVytduzYUdY+85nPLHt9Kba7+9xP93z1zJ2ZmSnHbNq0qTWPjnQdnzp1qqylT7ekT5KcO3du5PL0TEoR3Bs3bixr6Z1Wxc+vxCdCxi2ds+77s3oWp2s7HV8R54ybKwoAAKBBMwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA0xGn1ubq6sTU9Pl7WTJ0+OXN6NPO6Oq+LFUyxmqnXjZ6v5d6Oju6rI0m7kbjfquTpW3bjSNI9OVG9aX4qzT+czXcOd/e5GeqdtdcZ1PglwsXl0rqtxx6mvhO51msZ17unVfu5crbZs2VLWqhjrFA/dfU6nWvXZgBT3vZrS3GdnZ1u1FD9fRaqnTx6kCO4UjX6lxnOnc5auq7VyzUHHlXk3AwAArDDNFAAAQINmCgAAoEEzBQAA0KCZAgAAaIhpfinB5sSJE2Vtampq9MaurTd3/vz5spbSrFIiTjX/KuXvYutL86/SmYahTrfppHStJSnZLqmOR1pfd1uda2cljn0nRa+TRHgx477mugmX3XV2kui6qYJpW9X1uNrJmGthW1ez9E7oJH+ma7V7r3Wu8XEnpK4l6V1dpRumY5hS6K7UxD7g/+VOBwAAaNBMAQAANGimAAAAGjRTAAAADZopAACABs0UAABAQ4xGn5mZKWtVhOgwDMPCwsLI5UtLS+WYKk59GHJc7OLiYlmrIlzTmBRl2o3u7kQRp31OVjOKtXusqnErEaWddCLJ0xy78+jsdyci/GI6scyrHQVerbMbcd6dR+faWc24cvHnqyPFbKf3Z3X9pHdT+nxIusbTu7qKdk+fRdm4cWNZuxyiwDux7913HXB1WPtPPgAAgDVIMwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA0xGj1JkbCd2OCzZ8+WtTQuxbRW8a5VdPswDMP8/HxZS/GoqZaOVUeKdu1EZq9EHHw6Z1VUb/f4rkRM+Lh1Y9M70vFYiWj3zvrGHY3eiTsehnysOsejO490XjqR9axt1fnevHlzOSZdq6dPny5rc3NzZa26ftI7K70/t23btuxtrbbOPdp9DwJXB/9MAQAANGimAAAAGjRTAAAADZopAACABs0UAABAg2YKAACgIUajdyN5q9jXFLeaooFTbPrS0lJZq2zZsqWsLS4ulrUUMXvttfWhrKJYu1GxKdo1HcfOmG78eSdKdiWiczux6en4dqOvxx0Tnsak+zadl07kfrp2uvHnHd3o/O657syje32PO6J9Nc/LlW7cxzLdn7Ozs2Ut3fMnTpwoa9U7rfqsyDDkd9358+fL2q5du1rrHLd0rCqrOT/g8uOfKQAAgAbNFAAAQINmCgAAoEEzBQAA0KCZAgAAaNBMAQAANMS8zxTTumHDhrJWxcWmuNUUX7x+/fqylqJYq3UePny4HJOibm+55ZayllRzTFHxSTpWqVZFLKc451RLcbGpVl1X3djucetEhF9MirfurLMbtZ621YkXX4k4+45uRHu65lKEcrXOdHzTtpJxnxfR6OPTPacd6Zm6adOmspY+91HV0mdA3nzzzbL29ttvl7X0iZM9e/aMXD4zM1OOSdJ9mH6HVESjA4l/pgAAABo0UwAAAA2aKQAAgAbNFAAAQINmCgAAoCFG1KQEm3PnzpW1TlpbN30nJV11ksampqbK2jPPPFPWJiYmytr+/fuXPaabuJWOY3U+u4l96fh2EgI7qWUXq3V00/DSuLWSoNadY3Wu05huwl4a19FNtht3AmM3STHNY62kKV6t0jN8NaXE25S8Oz8/P3L50tJSOebEiRNlLaX5pVTBKulv27Zt5ZjNmzeXtfTeSimd1W8U9xmQ+GcKAACgQTMFAADQoJkCAABo0EwBAAA0aKYAAAAaNFMAAAANMRo9RZmnCPEq5jTF/6Zo16SKdh2GOgI1RRSfP3++rO3atauspdjXgwcPjlz+1ltvlWNuuummsrZ3796ylmJr0xwr3dj0FBnciZ/txp93oq9XIga3E2/djTHvRruPW9rWuOO+u1HraY7p+VfNMcUup1rSOVareZ6vZukZuFasW7eurFXP6bRfaX3dT1hUkepnzpwpx6TfDNPT02Utzb+KkU9j0nMCuDr4ZwoAAKBBMwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA0x1zXFo6Zo9KWlpZHLUzRwNWYYcizppk2bytrJkyeXva0LFy6UtXQ8UjRzNf89e/aUY5Jnn322rKVo9HvvvXfk8tnZ2XJMJ1p8GHJcbCdKNo3pRk5XutHi446j7ka0p/knaf4pXryzvnQvjTtGvrtfnfPZXV/3nK1EjD//e5dDLHaaY3Ufpuuq+1mG9LmManvp0ycLCwtlLY1LkerVuPS82rlzZ1kDrg7+mQIAAGjQTAEAADRopgAAABo0UwAAAA2aKQAAgAbNFAAAQEOMRk8xv5OTk2Wtik1fXFwsx6RI8m5c+fT09LLXl+JWUwT3uCOz0/HdsmVLWdu1a1dZO3v27Mjlr776ajlm27ZtZW3//v1lLc2/Oo7pvHStZnT0asZbd6OLk07k8bjj4Ich71snoj3pxqZ3jnH3nKU5Xg7R3FeycV+PKyFFklef7Uhj0vWYIsmT6jimz5ikY5/m3zke6f5Mc0zbAq4ca/9NAAAAsAZppgAAABo0UwAAAA2aKQAAgAbNFAAAQINmCgAAoCFGo6fY3SpCdBjqiOsUZZoiycc9bv369eWYc+fOlbVufHulG4ecIpbTOasi6z/wgQ+UY9J5PnbsWFmrYumHYRh27NgxcvmZM2fKMel4rGYkeXd93Tl2tpVq3ZjtalwasxKx0dU6VyI+vHt/jlv6JEPlcojsvhKs5qcXutLnQ2ZmZkYuT+/BdD+lmPDOOzK937vPwPS5j61bt45cvn379ta2gKuDNy4AAECDZgoAAKBBMwUAANCgmQIAAGjQTAEAADTENL8kJfpMTk6OXN5NvZmfny9rKW2uStzauHFja1tp/il1qNJNqEupSONO/kq1KpVvGOrkwGEYhiNHjoxcPjs7W45JxyMlSCXVvqVtpePRTWAct5VIFVzN+XfmmM5Ld+7pPquef+n+66Yspn2rtpeO4biTJa9ml3uSW/WcTul1hw4dKmvdd9ri4uLI5efPny/HJOn3SUoqrM7n5X6egZXlnykAAIAGzRQAAECDZgoAAKBBMwUAANCgmQIAAGjQTAEAADS0o9E7Ub4TExNj31aKL16/fv3I5XNzc+WYmZmZsnb27NmylqJTqzl241ZT/HInGj3No4q5H4Yc2ZxicKvY3aNHj5ZjUmx6uj46xyNdU0n3eFS1dF66104al45V2rdKNyq+Gz9fufba+jGXIpTTOetcV91tdXSfmTAMwzA9PV3WUmz6wYMHy1rnkx6d++xi20qfMXFvAB3+mQIAAGjQTAEAADRopgAAABo0UwAAAA2aKQAAgAbNFAAAQEM7Gr0T5Zsij1N88bp161rrrOKXU+xrik1NtRQhXh2rdAzTfnViqoehjmbunpdU60TFX3fddeWYI0eOlLWNGzeWtVOnTpW1HTt2jFzeifAdhvHHW3d17omLWSv7VuneE934+e5xXK31wUq54YYbytrzzz9f1s6dO1fWqmduesekez59hqX7rACoeKoAAAA0aKYAAAAaNFMAAAANmikAAIAGzRQAAECDZgoAAKAhRqOPOxo4jenGbFdx30mKRq3isodhGI4dO1bWtmzZUtZOnjw5cnmK2U77leafxnWi0VciZruzvpdffrmsffSjHy1rR48eLWtVfG6KWk9zXFpaao2rpDjydJ7TtsYdcZ7WdznEsHfvpWqOnedRWl/Xu+++O9b1wf81MzNT1rZv317WOtdkeqam+PP0DF+/fn1rnWvdm2++WdbS52VmZ2dXYjpwVfHPFAAAQINmCgAAoEEzBQAA0KCZAgAAaNBMAQAANGimAAAAGmI0ejeut4pETjHEKTa1GxNerfPChQvlmBTFunPnzrKW4lareczNzZVjkm6MfDWPdHzTeUm1znmZnJwsx+zdu7espX3+xCc+UdYOHjw4cvlLL71UjtmzZ09ZS/GzyeLi4sjlnXM5DP147s46u/NIz5Zubdzr63wWYCWecZ37rBv5DpcifSLk+eefL2vz8/Mjl587d64ck9651113XVmbnp5urXOtO3z4cFlLvxk2b95c1qrnXHW+hmEYpqamyhpcqfwzBQAA0KCZAgAAaNBMAQAANGimAAAAGjRTAAAADTHNL+kkZKV0sm7yV0qt6qYRVlKS21NPPVXW9u3bN3J5lSY3DMOwffv2sjYzM1PWUhphZ0xK2EvnM6UHVVLK4uuvv17WNmzYUNbSMT579uyy5/Hiiy+WtXQcU7pUdX1UKX8X29Y777xT1rqqdXaT8lYzUS5dp0m6hjvphmkeqZaecelarUjzY6WkRNNvfOMbZa16rqb1peTA9Hy89dZby9rlnER35MiRspaeZTfeeGNZO378+Mjlhw4dKsfcfPPNZW337t1lDS5n/pkCAABo0EwBAAA0aKYAAAAaNFMAAAANmikAAIAGzRQAAEBDzK/uRgpXulHl3Ujhccdzz8/Pl7X77ruvrFXRxidOnCjHpIjlFJu+fv36svbqq6+OXJ4iuFOM8sTERFlLqvOSYmmnp6fL2vPPP1/WUuzrs88+O3L5L/7iL5Zj0vH49re/vextDUN9ru+9995yTIrBTfdEN8q8MyZdw0mKdu/c06t5PFKMeVeaR3U80rHvHENGG/fnNy4HaZ+/+93vlrW///u/L2uHDx++pDn9/773ve+VtZtuuqmsVbHe6fMbqyn9Pjl16lRZ27hxY1mr4s+HYRi+853vjFz+5JNPlmPSufzUpz5V1rZt21bWYK3zzxQAAECDZgoAAKBBMwUAANCgmQIAAGjQTAEAADRopgAAABra0ejjjk1PuvHFVUxxii9OkaovvvhiWduxY0dZ++QnPzlyeYoCTXGlb7/9dlnbs2dPWativdPxWFhYKGtJWmcV25yOfToed955Z1k7d+5cWati5P/4j/+4HJMi8FPs6913313WnnjiiZHL0zHsRnCn+7YTZd65/4YhR/wm3bj1Sppjimivxo07av1iqvOZzvNKxLdfrc6cOfNeT2HV/fmf/3lZ+5M/+ZOyNu748+Q///M/y9ojjzxS1nbt2jVy+cc//vFyTPpcxrg999xzZe306dNlbWZmpqwtLS2VteqdfOzYsXLMU089VdbS74n0KZDqHb+axx4Sb1UAAIAGzRQAAECDZgoAAKBBMwUAANCgmQIAAGjQTAEAADTEaPQUmVnFSg/DMJw/f37k8k7U8MWkuOEqNvPs2bPlmDTHKuJ8GIbh4MGDZa2a4+TkZDlmcXGxrJ08ebKspXP21ltvjVx+6623lmPm5ubK2vz8fFm79tr60nrhhRdGLk+R7/v37y9rKZ74uuuuK2v333//yOU33XRTOSadsxTHW21rGIbhjjvuGLk8Rd124+zH/bmDdL8k3U8rVPdSuu6780ixu+P+NET3+VeNWyuftbjSPfvss+/1FFbEG2+8Udb+6I/+qKz94z/+40pMZ6y+/vWvl7Xt27ePXJ7eZx/72McueU7/v7/7u78bubz6jMYw5Pfx7OxsWdu4cWNZq953KcZ8y5YtZe3AgQNlLb3/qznefPPN5RhYTf6ZAgAAaNBMAQAANGimAAAAGjRTAAAADZopAACABs0UAABAQ4xG//a3v13WqgjRYahjOFOkd4o476ri21PkdIpDnpqaKmspOrWKbU6R3u+++25ZS9HXr7zySlnbunXryOUpVnpiYqKspdj0FJl9/PjxkcvXrVtXjjl8+HBZu+uuu8ragw8+WNa+9rWvjVw+PT1djtmwYUNZS9G0KY73137t10Yu/6u/+qtyTJKir1O0e7oOqlq6X7qfQljNiPYkPZPGHS+etpWOVXX803lZiWft1aqKsB6GYfiLv/iLsvbTP/3TKzGdsUkR1o8++ujqTWQFpAjur3zlKyOXV+/OYcjvn5mZmbJ26NChsvblL3955PJnnnmmHHP33XeXtfSJkPS+u/HGG0cuT3Hq6Z379NNPl7V0Xb322msjl4tGZ63wzxQAAECDZgoAAKBBMwUAANCgmQIAAGjQTAEAADTENL833nijrKUEmCqVLY05depUWUtpVkmVpPP+97+/HJMSzVIa4a5du8palV6XkuHSPG644YayltJ+du7cOXL5iRMnyjFbtmwpa+m8pHP9gz/4g2WtcuHChbKW9jml11UJQbt37y7HpFSn5KGHHiprTzzxxMjlr7/+ejlmx44dZe3IkSNlLaVEpuTGSkqxPH/+fFlL5yWpkujSfnWfHyuRENiR5lHtdzoe6V5ieVIy3K//+q+XtX379o1c/tGPfvRSpzQWVRLuMFzZaZBVauxjjz1Wjjl48GBZu+eee8raiy++WNb+8i//sqxV0jlL80jPx9tuu23k8pSInKTEwfROSGm+sBb4ZwoAAKBBMwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA0xGj3FW6fo7jNnzoxcfuONN5Zj5ufny9rCwkJZSzGtVdz6fffdt+wxwzAMBw4cKGuzs7NlrYoXT9HRaX1pjukYVzHWKRo9RV+nWhWPPwz1dZWuqaNHj5a1KgJ/GIbhjjvuKGvVNXfo0KFyTIrAT5Hk119/fVn7/d///ZHLf/d3f7cc8w//8A9lbWpqqqx1YraTFLOdov/TtZ+uq0qK9+3s18XWuVZUz790XtZK5PuV7rvf/W5Z+63f+q2Ryx9++OFyTHqmjtuHPvShsvbBD36wrKXPVFzO0mdR3nrrrdY60ztt3Ou75pprylp6Tt98880jl6ffQl379+8vaxs3bhz79mCc1v6vBQAAgDVIMwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA0xg3hycrKspbjyKvZ4/fr15ZgUb51iflPt9OnTI5enKPB77rmnrKWo5Oeff76sTUxMjFx++PDhckyKfN+9e3dZS8djcXGxrFWquQ9DjjJ/3/veV9aq6+PcuXPlmFRLsfop5nznzp0jl//Hf/xHOebuu+8ua+n6ePnll8vab/zGb4xc/vWvf70c841vfKOs/dAP/VBZS9d+uj6qc532OV0fKeY5zaPaXtpWilpP99nlEI1eSXHwaZ9ZHV/5yldGLv+pn/qpcswXv/jFlZrOslTPq2EYhi984QurOJPVk367dN6rwzD++zBFnKffDJ3nXFrfStizZ8+qbg+W6/L9tQAAAPAe0kwBAAA0aKYAAAAaNFMAAAANmikAAIAGzRQAAEBDjEZPccPvvPNOWZubm1v2mL1795a1boRoFQ984MCBcsz9999f1u69996y9vbbb5e1Y8eOjVw+Oztbjjlz5kxZO3r0aFnbvn17WauO4yuvvFKOqWLMhyFHTqdI9enp6ZHLqyj7YRiGs2fPlrWHHnqorL3wwgtlbcuWLSOXb9u2rRyTosW7MeGPP/74yOXV/IZhGL70pS+VtTfeeKOsdeLPh6G+Vk+ePFmOSddAivDvRplXUkz4uCPEUzxxdx7jtprbYnmee+6593oKF/X5z3++rP3kT/5kWXvkkUdWYjojTU1NlbX0KY1K+nRL+gxIUn2ao2vfvn1l7c477xzrttLxhauRf6YAAAAaNFMAAAANmikAAIAGzRQAAECDZgoAAKBBMwUAANAQo9FThHiKAH7rrbdGLr/11lvLMVu3bi1rKXo0xRdXEcApHvrgwYNl7YEHHihrb775Zln76le/OnJ5ioNPMdsf+chHylqKaK/2bf369eWYdKxSxHKKzK7i1tM8UrRrmseOHTvKWhXBfdddd5VjknSdpvNZxb7fcccd5Zh/+7d/K2uvvfZaWfvQhz5U1tJxrGLrU2RwiiBet25dWUufUKik50CKaE/nLB2P6r5I0fnpUwiTk5NlrfOMS/Hy3Shn/qd0TtMz8JZbbhm5fNxx2avtd37nd8paOh7VMyv9Bvn0pz9d1n7sx36srP3BH/xBWau2l36fbNy4sawlt912W1mrPouRni8pGn3//v3/+4kBy+afKQAAgAbNFAAAQINmCgAAoEEzBQAA0KCZAgAAaIhpfinB5o033lj2uG3btpVjUmLV0tJSWUspaSmNq5JS+R5//PGyNjc3V9Z+6Zd+aeTy5557rhyT9isloSWHDh0auXz79u3lmJRUVKXQDcMwLCwslLXp6emRy5988slyTErlS2ltaR7pmhu3ToLaiy++WNZSEuRDDz1U1l566aWylhLgNm/ePHJ5SujqJv1dc801Za2T9FelRw5DTiVN91mVPJnSNKuU02HI92Dnfk/HKZ1nlifda3v27Clr11133cjlH/vYxy55Tu+l9Fz68pe/XNaqpNnvfOc75ZgqEXEYhuHHf/zHy1pKtvvTP/3TkctTEnF6NyXp+vjc5z43cvnDDz9cjrnpppvKWvXOBcbDP1MAAAANmikAAIAGzRQAAECDZgoAAKBBMwUAANCgmQIAAGiIGbkPPPBAWXv66afL2qlTp0YuT3HfKTo6RZx34q07kenDMAzf//73W+NefvnlkctTpPeWLVvK2pEjR8paFWE9DMNwzz33jFyeItpTdHSaf4qBvv3220cuv+OOO8oxyfHjx8tainavpOujE3F+MdV9kWLHP/CBD5S1F154oaylCP8UwV1FzKdY4MOHD5e1Klp8GIbhwoULZa2K/E5zT+tLEe2Li4tlrZp/uifSdZXiytMnCKrnRFpfmiPLk+7DFM9dXa/dZ+DlYN++fcuufepTnxr7PD772c+Wtd27d49cnp6b1ZhL8RM/8RMjl585c6Yck6LigZXlnykAAIAGzRQAAECDZgoAAKBBMwUAANCgmQIAAGjQTAEAADTEaPQUN5xqVRz10tJSOaYTcT4MOfa4kuKtO+sbhhz7XsVKX3PNNeWYFG1cxUMPwzCcPn26rFXnJcX7njt3rqylOaZY6e9973sjl6fo+a1bt5a1FLOd5li5HKKjX3vttbJ26NChsvbggw+WtbfffrusvfHGGyOXp3tpz549Za26BoZhGKamppZdS9HF6dmStpX2rYooTvdm99MQ6do/evToyOXp2HfuCUZLn7D48Ic/XNaqZ//k5OQlz4m++++//72ewjAMw/CRj3xk5PL0Xr355ptXajrARfhnCgAAoEEzBQAA0KCZAgAAaNBMAQAANGimAAAAGjRTAAAADTEjd8OGDWVt27ZtZa2KIk6RvN1I8hR7/O677y57fSmuPM0xjevMI0Ulp4jldDyq45+ixTvx0MOQ9/nUqVMjl6frbX5+vrWtdKwqaZ+TtK1Uq+af5nH8+PGy9pnPfKasPfbYY2Utnc/3v//9yx6T7vdqfcOQ962K6t+0aVM5JsWmpyjzNP/qfKao9eTEiRNlbWZmZtm1bhw8y5OenakGSfU5hCoyfRjy8+r8+fNlzXUKl84/UwAAAA2aKQAAgAbNFAAAQINmCgAAoEEzBQAA0KCZAgAAaIjR6JOTk2Vtenp62RtLMcQpyrcbf16NS+vrRGlfbB7VOrv71YnZHoY6OnXdunWt9aVI1RQjX80/7Ve6dhYXF8taN769kuLb0/lM0fkdS0tLZe25554raykmfP/+/WWtOjdpfel4pHM2Oztb1iYmJkYuP3bs2LLHXGxbKU64ejambaVaupfSMa6ug7RfjE/3MwqQVO/q9AmIhYWFslZ9jmQY8rskbQ/4b/6ZAgAAaNBMAQAANGimAAAAGjRTAAAADZopAACABs0UAABAQ4xGT5GZqdaJAu/Gpo9bmkeKwe3MMW2rG3+exlVzTPuV4vHT/Ofn58taFZue9ittK0nrrPY7HcPusUrXRydeOY05fvx4WfvRH/3RsvbKK6+UtZMnT45c3r1Ok3Suq2j6vXv3lmMOHDhQ1tL8U1x5NS6dlxS1nsZt2bKlrL322msjl1fRyoxX97kESXX/pudE+txEeh+fPXt22evcvn17OeZK9vbbb5e1rVu3ruJMWGv8MwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA0x8imlcaU0v06K12om9iWdZLVh6KXGpW11E/uS6hin9U1MTJS1KgclvDwAABJGSURBVJVvGIZhamqqrC0sLJS1SjcJMqnGda+BcY9L60vHPiW5vfrqq2Xt0UcfLWsf/vCHRy6fmZkpxxw7dqysdVMA03OncvPNN5e1ubm5spZSrqr5p2Of0gHT+UxmZ2fHuj6Wp3M9Qtc111xT1tI9f+7cuda4KoE0jdm5c2dZu9x1foekc8aVwz9TAAAADZopAACABs0UAABAg2YKAACgQTMFAADQoJkCAABoaEejpzjqKjYzxQavphRvmWrd6Otx68Swp3Ep/jzFeq5bt66spejUxcXFkcvn5+fLMd1z1rlO0zHsXh8pQryK2e58YmAY8j6/+eabZe0LX/jCsrf10ksvlbUNGzaUtRQ7np4TnRj86nobhnztb926taxV1066hquY4WHInwtItY0bN45c3r12WJ50vmHc0jM1vavTczPFple19ExK78hdu3aVtcvBtm3bytrhw4dHLr/c95n/Hf9MAQAANGimAAAAGjRTAAAADZopAACABs0UAABAg2YKAACgIWaVp9jXFH1dxfKulbjetRJxnqR40bUiRbGmCNcqpjVdbyneOsW+dmLk07aqGPO0vouNW79+/bLXl6TzkuK503F8+OGHRy5/6KGHyjGbNm0qa3Nzc2UtqY5j937pRK0nU1NTZS2dl82bN5e1dB1cDs+yK1mK+IdxS5+NmJ6eLmunTp0qa+kdWT2n07u6++mWnTt3lrXLQfrNw5XPP1MAAAANmikAAIAGzRQAAECDZgoAAKBBMwUAANCgmQIAAGiI0egnT54sa6dPny5rVTxwiodOtW6keic2eCWihqt1pm2lfU4Ry+PWPfYpwnVmZmbk8qWlpXLMwsJCWUtzTOMqKWY7RcJ2o9Gr7W3cuLEck/Y5zT/N4+WXXy5rv/IrvzJyeXoOnDhxoqylCPHuMe6MWYnnTiXFsKdzlu73av5r5TMUV7pz586VtRSbnmKsoWPr1q1lLUWjp1r1CZz0jE7Pue6zeMeOHWVtrag+b3HkyJFyzOUeB89/888UAABAg2YKAACgQTMFAADQoJkCAABo0EwBAAA0aKYAAAAaYjR6ir88f/58WauiMScmJsoxKRo4xWmmcePe1lrR2eeV0J1HdR2kuOAUm17Ftw7DMCwuLi57XIqVTttK90uKt66uuTRm3bp1Za0r3dPVvv3Zn/1ZOebTn/50WUvR6OmcVbpxvF2da7/7KYQUGVxJx0Ns+uqYm5sra6LRGbf0nNi+fXtZS5+3qGrpkyPpkwBJmn/1m2HLli2tba2m9NuFK4d/pgAAABo0UwAAAA2aKQAAgAbNFAAAQINmCgAAoCGm+U1OTpa1TsJUSulK20o6SV2rnYbXmWM6vt10w9U8Vp15rF+/vhzTTexLSTrVHLtJaCnhKB37qnbttfXtmdaXUjO7XnjhhZHLv/jFL5Zj0jWcjnFKFayug5R8mM7ZWknGTNIcq31Lx16a3/hs3LixrKXnGaymzZs3l7VO0l9616Xnd0oO7KT5pVTbDRs2lLXVtHfv3rL24osvlrXbb799JabDCvHPFAAAQINmCgAAoEEzBQAA0KCZAgAAaNBMAQAANGimAAAAGmI0+szMTFlLkZSVFPGbIqxT7HHHSkSLd6LiO3HZl1JbTWkeVTRzGjM9PV3W0rWTYtOreO4U2929PlK0exUlm6LR0/WW7pduhPjZs2dHLt+5c2c55q//+q/L2o033ljWUozviRMnRi7vRsWn66NzT3dj2NP80/XYIRp9fDZt2lTW0vsT1ordu3eXtTNnzoxcPj8/X47pfsak2tYwDMObb745cnmKP7/11lvL2lqRjgeXF/9MAQAANGimAAAAGjRTAAAADZopAACABs0UAABAg2YKAACgIUajz87OlrUUCduJe0yxwd2o6s6YThzyauvs81rSmX+KCU8RxOnaqSLV0/wWFhbKWhqX4qirKNkUP5vivlNUfBqXVPv2/PPPl2MefPDBstaNAj958uTI5d3Y8W5UfCU9P9J+pW11jlX3ecrydD4RAmtJegbecMMNI5fPzc2VY9Lvv+478ty5cyOXHzt2rByTfr9u3769rK2m2267raw988wzZe2DH/zgSkyHS7D2OwcAAIA1SDMFAADQoJkCAABo0EwBAAA0aKYAAAAaNFMAAAANMRq9G8FdxQN3ooYvZVwlRQ2nba2V2PQ0/zTHcR/HcUv7la7FqampsrZ58+ayVkW4pujolYi3rmophj1FnHdrKX6+muOJEyfKMSn29V/+5V/K2q233lrWqnOdonqT7r1Uxdl3t9VVXXPpfunMHbj6VO/P3bt3l2PSJz2S9EmPSnruHz9+vKxt27atrK3Ec7qSPq1QxcEPwzCcP3++rK1fv/6S5kTP2ugOAAAALjOaKQAAgAbNFAAAQINmCgAAoEEzBQAA0KCZAgAAaIjR6N0Y6KrWje1OUZVpjlU88DXXXNPaVlcnYn41Y9i7keSrKV0f6Xym2PTZ2dmRy9M1lWKlU7RrWmdHip9NcauTk5NjnUeKWj9w4EBZ27VrV1lLcfbVuU5Rsd3n2Lh176XOuLRf474Wr2aOJVejFI2ensVJijnv3GdpfemTHlu3bl32tlbCjTfeWNaeeOKJsvbggw+uxHS4CP9MAQAANGimAAAAGjRTAAAADZopAACABs0UAABAQzvNL6WaVVYiOWvc6XspOSttq1vr6K6vSghcicS+tM5OUmFK7Oues+np6ZHLu4l9p06dKmtp/h1pnxcWFsrauNP8rr22foQcPXq0rH3yk58sa88880xZq55JGzZsKMecOXOmrHUTRqtxq5kO2HU5zPFysVbSTmGt2Lt3b1lL90t6f1bvtO7vrpT0t1bS/Hbu3FnWnn766bJWve9mZmYueU7U/DMFAADQoJkCAABo0EwBAAA0aKYAAAAaNFMAAAANmikAAICGGI2epHjdKv4yxRCnGMtu9HVnTDc2eDWj0ZNOnPO4j++ljKt0I4g7Ee1VZPow5Njxblx5VUv3S6qleaRo9879mT6fkNZ34MCBspZia2+//faRy8+ePdtaX5Lupep4dJ9j3edOtc7uPFie1Xy2w+UgfX5jz549ZW1qaqqsVc/39NycmJgoa+ndlN5p4/7ESdddd91V1qrY9B/+4R9eqekw+GcKAACgRTMFAADQoJkCAABo0EwBAAA0aKYAAAAaNFMAAAANMRq9G2VeRUuuRMx2J9Y77Vc3orgjzSMZd5R5JwL6UmqrqRMDnSJVZ2ZmWttK6xz3sepeVxcuXChr1RzTmGuvrR8vhw4dKmuf+MQnytqjjz46cvmWLVvKMemcpUj17nGsdKPRO/fZSnzugP9prUQlw+UgxZ/v2rWrrJ06dWrk8sXFxdY8Unz75fDpiN27d5e16rMjR48eLcfs2LHjkud0tfPPFAAAQINmCgAAoEEzBQAA0KCZAgAAaNBMAQAANGimAAAAGmI0eop9TbUqLrmKTB+GHKOcpHlUccMpGjjFIXej4qvaSkSLd2I9u3Hw6dh3jtVKxNJ34qjT3NetW1fWNmzYUNY61/fCwsKyx1yKdH9Wx6QTp36xbT333HNl7Y477hi5/LbbbivHPP7442UtRfXOzc2Vtc79mY5V53naJRp9fESjw3ike2nr1q0jl6do9PTcTO/47m/RteLuu+8eufzpp58ux4hGv3T+mQIAAGjQTAEAADRopgAAABo0UwAAAA2aKQAAgAbNFAAAQEPMgEzxkSnGsoqWXFpaKsekWOxxR3CnaOCJiYmylnRizjsx5hfb1rilOaZ466Q6nysRB9+JW0/7leaYYtOTKlI9XdvpXupK26uk45vmmLZ1/PjxsnbfffeNXP6Hf/iH5ZgvfelLZe1v//Zvy9r1119f1s6ePVvWKmmf03Ec9/0uGn18HEt470xOTrZqV7LNmzePXL5x48ZyTPoN5Rn3v+OfKQAAgAbNFAAAQINmCgAAoEEzBQAA0KCZAgAAaIhpfinFIyXsVePSmJSgllKwLly4UNaqVMGuTtrZSuimAFbHOJ3ndF6657PaXjq+aZ+7tU7SX5KOYydZKB3fdN2nFL1uAmOle+zTPBYWFsrak08+OXL5z/7sz5Zj0nm58847y9r58+fLWpXm103lS7VuCiArT9IVcDm4++67y9qxY8fK2o4dO1ZiOlectdEdAAAAXGY0UwAAAA2aKQAAgAbNFAAAQINmCgAAoEEzBQAA0NDODk+xx1WUbzdGOcVAT0xMlLUqNrgT634x447I7cafp3HjnmM3Nr06nynKfiXiz8cdK925J4ZhGNatWzdyeTqGaVvz8/NlLd1Lqda5drr3dDpWR48eHbk8xan/9m//dln7vd/7vbL2m7/5m2Vt//79I5enY9+9NzvHPl3b3WcL/5NjCVwO0u+rzqdb+H/5ZwoAAKBBMwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA3taPQU19uJnO6uL0UsV1GQKXI6STG43XV2ttWNOO+cl27seFIdq25cdnce1bhuDHuytLRU1qpY0m40eoo5TdfOuPe7e6zS+VxcXBy5/KmnnirH/Oqv/mpZe/jhh8taFVk/DPUc0/FNnxLoRqNX98W4Y/8ZLUXhA1wOZmdn3+spXPb8MwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA2aKQAAgIYYjV7FEA9DHTs+DHWkc1pfilFOsdgpTrtaZ4qcTvvVjQmv5j/uePnuuHQ8VmIe1X53Y7uTTtx3irBO+9WNzu9cp13ddVb32cLCQmtbmzZtKmtpndU8Xn/99XLM5s2bW7WDBw+WtbNnz45cnq6B9GxJ0flpXCeinfE5cuTIez0FYA07f/58q7Z169axzuPMmTNlLb1/JiYmytrMzMwlzelK4p8pAACABs0UAABAg2YKAACgQTMFAADQoJkCAABo0EwBAAA0xGj073//+2UtxSyuX79+5PIUv5jiqFM0elLFKKd5pDjnbnR3FV+8Etvq1sY5Zhjy+az2LZ3n7n6lKPOq1o0/7x6rSppHiivtfkpg3bp1ZW1+fn7k8vSMSNf3c889V9Z+5Ed+pKydOnVq5PJ/+qd/Kse8/PLLZS0dq4ceeqisVefmW9/6VjnmwQcfLGuHDh0qaym+vTpn1fkahvFfp1ez559/vqy99NJLZe22225biekAa8yLL75Y1t5+++2ydtNNN5W1ffv2lbXqsx3f/OY3yzGTk5NlLf0uSL9Dqmj3HTt2lGOmp6fL2tzcXFk7d+5cWbvuuuvK2jj5ZwoAAKBBMwUAANCgmQIAAGjQTAEAADRopgAAABpimt+BAwfK2oYNG8paleaXUkFSyliqJZ1xKd1ramqqNY9KN4UuzTGlAFbHo5vu1R2X5lhJ6YBd407z6+xXWmc3xTKl6KVauj+fffbZkcurxJ5hyElFO3fuLGsptadK4rz11lvLMddff31ZS6mC//zP/7zsde7du7cck66dt956q6ylc3bLLbeMXH7ixIlyzLXXxsc+y/D000+Xta997Wtl7Rd+4RdGLk/JjSuhuibTMzBdj3C1qpJmX3311XLM0aNHy1pKytu0aVNZq9IDn3nmmXJM9Zt9GPJvr/SOr34P3X777eWYz33uc2XtX//1X8vak08+WdZ+7ud+buTy3bt3l2MOHz5c1nbt2jVyuX+mAAAAGjRTAAAADZopAACABs0UAABAg2YKAACgQTMFAADQEDNyU2xjihWcn58fuXxmZqYck+IXqzjki42roiVT7Ovi4mJZm5ycLGspNr2KiExRyWm/UmR2J047HY8U992NAu+sL8XxdiPaO/Po7nOKOa32LUVYp+OR5piuj3R/VrH6KVI1xZ+n6O40/29961sjl3/2s58tx3z1q18ta+m+PX36dFnbt2/fyOU33HBDOea6664ra2+88UZZe+mll8paFdO6ZcuWckwVc8/yvfnmm2Xtb/7mb8ra7OzsyOUf//jHyzHp/ZPeW+n9WY1LY9KzLH0yJdXSOivpvdV9dnbeJd33cXrOdebRXV86jtVzv/u5miQdq+r66J7nNK57n1WfSUifGErPjzTH6jf2MAzDY489tux5pPdg+i2a5l+9P1NEe3pvPf7442XtkUceKWvVsfrlX/7lcsw3v/nNsvb5z39+5HL/TAEAADRopgAAABo0UwAAAA2aKQAAgAbNFAAAQINmCgAAoOF9446VBgAAuBr4ZwoAAKBBMwUAANCgmQIAAGjQTAEAADRopgAAABo0UwAAAA3/BwhY6E6TVBJgAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "v1_ind=np.array([427, 208])\n", + "\n", + "fig, ax = plt.subplots(nrows=1, ncols=len(v1_ind))\n", + "fig.set_size_inches(15,5)\n", + "for v1_i, v1_ind_ in enumerate(v1_ind):\n", + " v1_k = v1_model.simple_conv_q0.weight[v1_ind_,:,:,:].numpy().mean(axis=0)\n", + " v1_k = v1_k / np.amax(np.abs(v1_k))/2+0.5\n", + " im_h=ax[v1_i].imshow(v1_k, cmap='gray')\n", + " ax[v1_i].set_xlim([0, px])\n", + " im_h.set_clim([0, 1])\n", + " ax[v1_i].set_axis_off()\n", + "plt.show()\n", + "\n", + "\n", + "fig, ax = plt.subplots(nrows=1, ncols=len(v1_ind))\n", + "fig.set_size_inches(15,15)\n", + "max_activations = np.amax(activations[im_ind].numpy())/np.sqrt(2)\n", + "for v1_i, v1_ind_ in enumerate(v1_ind):\n", + " v1_im = activations[im_ind,v1_ind_].numpy()\n", + " v1_im = v1_im / max_activations\n", + " im_h=ax[v1_i].imshow(v1_im, cmap='gray')\n", + " im_h.set_clim([0, 1])\n", + " ax[v1_i].set_axis_off()\n", + "plt.show()\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1IAAAM9CAYAAACWhEKIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOx9WW8kWXbex9y5FItVrOrqfbp7unt6ZiSNLGuxDVg2YNnP/h2CnvRseHoGhgH/Df8DPcqAXwwIgkeQBpIsaTTjmV6ruxaSxeKaCzPph8R388sTJ4IRmZEk2zrfS5CZkRF3Pffe851l7fLyEoFAIBAIBAKBQCAQKI/GTRcgEAgEAoFAIBAIBL5piINUIBAIBAKBQCAQCFREHKQCgUAgEAgEAoFAoCLiIBUIBAKBQCAQCAQCFREHqUAgEAgEAoFAIBCoiFbRlz/60Y8uAWBtbS19xr8nkwkAYDweYzgcAkC6jsfjdF+n0wEArK+vAwC63S5arfnXjsfjdOVzGU2wbFTBjz/+eO3qu4Af//jH6YF8L8vNMm9vb+P+/fsAgMFgAAD4x3/8R3zxxRcAgEePHgEAfvd3fzf9/9lnn6X7AKDf7wMAdnd3sbOzM1eG4XCY3k3w3drWP/zhD0vVif2k8NpNn13m/qLfVb2fWKZOZWHLVrb8ReUuQtk6ffzxx5mXeu+0n62treWWrdFopDnjwZtHeX2nn5edTz/84Q8v+duiOdtoTHU2zWYzXbvdLoCZjNAr7yM4XwaDAc7PzwEAJycn6Xp6egoA6buLi4tUFr77v//3/35lnbw+Iq4aH0X1t/O7aEzWLfOWmUsWq4ryyvaoOpe8tUnB8nI8XFxcZGQv16Nut5vGZLvdnrtH1zk+S5/j9e+idaobi8q1IpSt0x//8R9fWadGo5HKyLmq7VfmOyKvrmXaoM71dpk1Z9H7vHvqHHt1jKNl5EeV+VRG5umaVeX9dd9/HXui68Y/tToFIxUIBAKBQCAQCAQCFVHISCmKNKo81VuGZ+5FovVTzZLiKq32qkHN/mQySWWkhrLVaqV6URNOzWSv18Pm5iaAmdZsNBqle1gXatlXoSEMTHGb86Ityj6pxtYDx5zH5tr5tCiTmAfvXV75OP897b9lppSN4hzjfDo7O8PLly8BAEdHRwCA4+NjnJ2dAcjKnlarhV6vV7leV8GTXfZvXotYk7w2v63jmOWtu3xVn+e1qdeWajnBK8cUwfHWaDQSE8Wx6JXLPpO/1etNy/ibfj/BuTcej1N7WTnlMXtAfpvmMVj29/pZ3vhaVTtdXl66c32V89qTN6t4vmIZlqZqW9TVdiqfrTWHvqNoTBXhtsy9fyqoe0+zCCofpAgr5IB58xs9lAAzUwk9pNiDhQ7sVS3YClsn7yClCys3c8fHxwBmZn/dbhdbW1sAZnVSc0fW4bYssoQn2LX98zYot3WTB/h1qmOBqXORumoT6I0Tvt8K/slkkplr9mpRtPmoCu+AZjc5rVYrzSMemnq9Xtpk8Ts1+eX8oYksD00vXrzAixcvACAdqE5PT9PcJPjMdrudMSVeFnmmlrYtdIOom3W9eigyw7zNsuO2QNcOO29Go1E6SNk+aLVamcO9HsAInV/W1L2oX1eJogN6FZPuuvuUJvL9fj/NUVUy8urJN7sXKDpIeTLTU1zaZ93UGLbv1XFTpIi5bfDkfllUdRlYFnaMeWunlss7rBft4W5zP/3/jKK5fF3rZ5j2BQKBQCAQCAQCgUBFVFbVWo2baod4qh8OhxnHXGqINzY20m+tllYZqVVpJpQdymOkVPuoGnR+Tu04zYna7Ta2t7fn7vfYOWXi2C5W63Yd8JgITzNDTb69fzKZ3IjWrM4AEZ4Wo0xdlqlvWS0xx45q0KxmXK/2fi2j1dg2m82MqWmZMuXB00jyuRw/nU4nzQsGnen1esl8ivdx7A0GgxQ84vDwEADw/PlzAMDBwUFipMgMKxtFlkuvZIvrgjJSalZp5Yc1IQOyMk/7g/evra1l5KCO11XPtTJMZZEpo/esVaGIOQey5uaDwSB9xjWJv+t2u2l94tjk2FLLAp17/G3drGcZePLEW5/zoIxamWA0i+DNN98EMA0Cw0AwZJlp0TEYDDL7BWWpPCYhr8+bzWbG1FnbwGPjq47RqmuAx5DaZ2g5bH0vLy9d6506UQc7VLfp3zJl8X7L9tRgMZ65qV3HVGbnMb+3kY26DSZvdaHMnCszN1a1fgYjFQgEAoFAIBAIBAIVUVmN5tmPKqMETLV4ZGuodaLGbmtrK91nGY/Ly8uMxv06bPEto6baMGrSlUlTJ3dgqrG4c+cOgJnGXRkprQvgawmvk9nx3q8+BCwH+8ezNfeYw2+almMZLKLZKNL6qrbVMhqj0cj1K+D9duxoYAdq3b3QwXVoZwmVAxpQApiyQspE8TuOK2WyganvE1mnZ8+ezV339/fTvOP9jUYjPZ/1ZfCXu3fvZtIPlMFVIcxZR9ZZgxjo3OezLPtm20qfrwEzrAZ7ER+csrLFk092jKh21/MF03vynl838uSqhjrn/On3+xmmgn2wvr6e2EuOI7Iop6enGRl5cXGR8ffVMq2ivnkslC2H54tXxkfkqrW26lr8zjvvAJi2O9uSewNePbbK86ni1fN9I9S/mSjLUi2DIm24yg3Liih7z3Go+yhbJ52P1hKh7vFWxjfaCzKk5bH7xWUtSKpCrZzs2qnrqhfwRP1seb2t/u6KuoNyKG6ivnn7pryy6OdFflN11iUYqUAgEAgEAoFAIBCoiIUZKdXmWcZCGSmrAbpz5w7u3buXeQbgM1Kq+awbeRqXi4uLTLTBjY2NVE+GP6f/xuXlZfKRokaTv1fNmhd1yKJuG86roqlZ23RqBBuNRiY5JX83GAwKo7UFfHhhV+14Vy0Z+yZPcwbMhxa3kfG63W5iQjQyXpUoXldBNeHe+4Hp3CFjxPI0Go1UF2qjOZ+ePXuGJ0+eAACePn0KYOYjdXR0lNqF9d7c3EzPJ/u0u7ubrpybVVDUDlpXyoTxeJzRnGtyYDJxWn9g1kbAvPzMK8eyvgJFWjzPn84yHMRkMpnz6QLmkyB70ZJWoQn0GBqdZywT2cF+v5++t1EjNzc3k2UBx5bONxuKfzweZ1iEuuH5ZNh+UjbDXrXdPc289dX1sEzd3njjDQDTNrOROJWhok8kr2dnZ3OMFTDvU2XlobIz1gIkj6UCrk5oXhU69ixrob7RtvyaDoJQ+WH9TvW51+WvfJX/UhG7af2OdA+0ivIWrbVs+8FgkMaiF2HVsk+6dnos+KojYN4GXIePblks6zdV59516YOU5t7QAWipemJ7ezvzmQoHK2g8h+26UHSQ4nvVJJGbILvxG41G6QDFhZjtowcp4qq8QHXCGywaBMCaEdBsqt1up7pww6GbRiuA1LzktsDbzFkz1OsUdrro55kbDIdD13zPmstxLGoYcZqf6qa9yIzTmqJ5OXaugh7i1JQPmJm5rq+vZw4Qo9EobZpoxsdD01dffYWvvvpq7jM156O84Xvu3LmDBw8eAAAePXoEAHjllVcATA9WapZ7FXRc2LGhmwDWR4MSUC6wPalw0T5lWfQgZc3C9ODsbfLs/VVQZvHh8zudTiavkm5ONYADMJP1alK3KkWLbmjyzA/1cKsmY/Z+jtPt7e10EOczVCFo66sbdE/W1GnGpOuuVZxoMBcvlYANuKHrnJWD3rhfBiwXTW2BrBn5YDDImP2dnp6m+WMPWScnJ3P9qXXTueaZMBI6D1axbnlBmzqdzpycB2Z9s7W1lWQD76cLgY5ZjtVGo5ExrdXxv4oAGp6pnsIeVEajUUbxpwEcrJKmTnNYPdx5ykpg2q4qswibz1APVJ6igvBk3nUfpq7jfdfphmLfU8ZUzwvcUvR77xlVcbt2voFAIBAIBAKBQCDwDcDCMVtVi2Cd8tbW1tKpn1olami2t7eTZsmyPo1GI/2Onw2Hw5WH/LQaXmWk+N3m5mbSgFMjub+/D2DKUFGbRI0my392djbndM5nWi1Y3ad8T9PIZ1Pjsr6+nnGMJ8vW7XaTVp/aRPavhgLWPqyzDt6zvHHgaSMIq5lst9up7oRqBj0zmqJ3V0WR1pfjQbXLHHvtdjtpItkXZEA3NzczIZtVk6ZaX2BeC8frMqYtfGe3280wUcqM2UAKx8fHaf58/fXXAIAvv/wyXWnKx6S77MtOp5OYUs611157LZkQvfrqqwBmiUA3NzcXCk2tJiH6GTBv9sq6XlxcZBgpyrnj4+PU/jYUe6/XS/OK0GATXt/UoUH35pcNWKDhwHnVxOqsE2U8f392dpYx91ml43Kelvzi4iKVkX1zfn6emV+cUzs7O7h79y6Amdb64OAg8yxegWzY82XMla5iooBp+1tGVllpy/yOx+NUXsuCXFxcZCw/PDZjmTqx/ba2ttJc8QI4eWyBBvoAZuPs5ORk7m+95/z8PLFaaiJt2XeVxZY1qYK8NlGrA2U+7XpL1ml7ezuNQ1739vZSnQjKj263m7EeqGP9vYpBtoEYOp1Ohp1hubQvWF/PZcOGGfewTLAJy9ypxYcd/14gId0vcQzbee8FG1Im9LaY+XnBP27COmdR6D7M2+MC03FkmUOvTzwsOoeCkQoEAoFAIBAIBAKBiliYkVI2x9rRa5hIq5G4c+dO0sKo5pW/4zM0bPqqT/X29DkejzNlU0dkah6obTs+Pk5aNmrJ+f9wOJyzqQem9bwufyJP26Z266wntV5kAjY2NtJvWW/+7uTkJKPdUWayTNjUuuE5f9owu1tbW6l/1N8LmGov8xjKPFStlz5f/VD0nZ1OJ2mX1e+OmnIGTlD/NdU+AzOm6eTkJPkWcQyORqOMZl21clXrVBTinOPh8vIyo4F9/vx58oMiE/XFF18AmAabIBPFOmmbkCn91re+BQB4++238frrrwNA8pWiVrfValXSOHu29YQGzmG9NbgMteK8j22/v7+f5hcD7fCebreb2o04PT3N+N6odrvO+eQ9S9kPZWuAGQPXbDbTOGNfqWaabaEhq+uEx6Sp/xYwz8ZowAK7XnEu3bt3L80vMh3EcDjMBD3w5ERdDvReYCBe1T8SmMpqy7JpSgGVB8A8e2NZDfV19UKoV63X3//93wOYti3HkA3MtLGxMaf15z3Wt4v1ODs7S3NLWV/W0dbz7OwskwRY2blFGSkvoIT6n/K5Wl/rd8O0Dnfu3EltwHYiU68pVBhER8eBDVxRl69RkX8J/261Wmn+8KrlYr/YJOrn5+eubMsLK15Vfnhlzbsqms1mKj9lH/tP/W3tmqv7PA3gdZMBKLx6qr8eMK2vZYOvmg+r8o3y/EyLymD9+z3mlmOS806tzYrGQVVmqvAgVdTpKuitqUG3251zKgdmQm1/fz9NKgo8bn7UuVkDWKzaqc0uWsPhME0GTvKNjY00oXg/63F4eJg6ikKQE84LzJAXBKBOFJn26UFKFxtgdpBSEyTWm4vc3t5eZhCr8+t1wr6z1+ulevKgywhwOzs7eOutt9LfwOwgyHYAVpcjQk0GrEBj225tbaX25uHp7t27mTxlehC0JjAclwcHB8lUkwvY2dlZxsRnGZMQNS/yzIqA6UaAm22Or8ePH6eD0+effw5gtqk4Pj5Ov7Ums2+++SbeffddAEjXN998M8kQjlsVnDbYSxmoyQ9lgEYnZV11nvCQyPqzX/b399P8YjAMjrt2u53qyH7wAlDopqMO81L7/9raWqa+Wk+OP5pM9nq9NI644dAyW/OJ0WhU+O5FUZSTbzQaZTbQqjCxY2tnZyfVk/drNFMbQKnX6xWaFi8K3Qh7EfpsJE6Vvfbg+PLlyyQDOAfV9M0LQGRzni1Tt5/85CcApuOGY4eHAf6vJpUq52z0T841VXTaIBWqPNIr9xpWudbv9+dMNReFPcDquq/zxColKQ97vV6af5QRlGmTyWQu8ATgm6B5wQ6Wgd1DqFmU1o3fcz5x/dLDsEZkBaZ7CNbJbmzrRl6kPTUBY//1er00Fjk+2R/37t3LBC7i2MlTft2kaZ+ORa0fMFPodbvdtG/Q4GmAryi4Tnhmpt65g/XkHH/+/HmqJ03+Hz58CGDazzaKY5FysmzgljDtCwQCgUAgEAgEAoGKKG3al0eLqimAOptT42JN/F68eJGYAg0PDEw1GkWMVN0O/545GKE5fICpNomaCpaNGr6Dg4P0XN6jJj82xKZHYavmos7Tv9bXOk9ubm6murCMdHBVDTK1aNQ8dzqdTAhbdZi/LtM+L2iDmpiRiSLjsb+/jx/84AcApgEKgJlGaW9vLxPopG7WUDW9NlgBr3fu3Eljh/MJyOYuUy0S5xODN6jJKeedal/sONBy2cAHV0HDLltnYzX9ouZVzfj4N0Ocs46Xl5dprFkzvvfffx/vvfcegKlJHzDVNvF+y4RrcI0q0GATXn4UthP7rd1up37ToCzAlKmyZnAcd2tra6kNlbG2jJSaFdaR98aa1XjO3yoDWCdrxgPM58Li7yzr6Wlnl5FzRaFu1exYw2KzHJo3CphnfnXO6e/Oz88zcnwymeQyR8siT4uu2lMthw1wQlZ6b28vyXTOL/6u0Whkgtjou5YJs0/89Kc/BTAdL2QqLDO1u7ub2Bd+du/ePZelAvwgKLxXTTA1SAXrbtmqk5OTJCPLosgcVoMQ2LDt7XY7acbZBmRlPvnkkyQbNIgOMJUHlO181uXlZe465a2LV8HbW3mmsmpeCUzlGN/POlEuv/vuu4nN4XdqVst1R83P62LVPPlgc3GpZZAGnuEYZPvzuru7m8agTXGhLihqKl+UTmRVJtqeHGT9yER98MEHAKbzh+swx6KuTTa10U3BrhmtVivJEwaYYlm/+uor/OpXvwIwk4MfffQRgOlewTtPLHu2CEYqEAgEAoFAIBAIBCqicrAJz0FLw+UC8yGZefrXhK88JVqb7e3t7YzWQB3zV2VvajUW6hxPjWS73U5aL2otqWV//vx5uo/aM56WG41Gxqmfn+u764Zn30xoyGrWWbXnwHw4cy8EstV6LOu7UVSPvHC8HmvQbreTdpN9QG3e559/nv7md9Q+ffrppxlftkXCZhdBw7xae2pqro6PjzNBC/r9fuoX1TQD07GnDBQwH8SFfc2+Uzt9QjWbVTXqqqmyTuGc38+ePUuBJegXpSHO1T8NmM4hagHpB0UN2vvvvz/HRLFOHBN23p6enqZ2qQprS+0xEeoj5jGJwDyboc7WfJaGuQem/aH+WIq6/R+0jtZHqt1uJ42rDe3e7XYz7KXWkX2qvkaL+KpdBW0fK5M0sbFaA2giZ2AmCzY2NtL41xDOwHwaC9UyWyZKGaQ6mEOvbpZlGw6Hc4FNgBnL++zZsyQzeD/HlgbfsQm/6wKtAVqtVmp3z4mfjAVl94MHD1yWivdroApg3tfU9q+X8FfDpy8qH5T58QJ0cLxoCgfWhcFx2BdffvklHj9+PPcZ2ZydnZ3EGrDco9Eo1TNPViwC7xlewlyOpWfPniW/J7Yx++add95JTADlOPv88vJyLiAXML9HsQEPFpV7HiPlBcXi+Nne3k5WEGx/+tns7u6mNtd9IcurKRaAebYuL5XGKuBZG7B+ZG++973vAZjOH5aXgWE4HzQo2nX4SpVhhXQtZl+wLr/zO78DYNpvf/InfwJgtkfn/qPT6aTxWRTev2r/BCMVCAQCgUAgEAgEAhWxtI+U2pzzurGxkdEY8UTb7/eThob+HhpFyIaDVu39qk7Dnq+UTWTaaDQyNtvUEj1//jxpYG3EtVarlbQXGh2oKBLSqmxnvag1/MyGwFStq2UJvSRuNxGVBpgPN8tycOxRo8Rx+eTJk9Rn6pcHTFkEy4xcxYZVhdp/s7zWh2k4HKZycJ7s7+8nDTOZKGWhWBf2K+247969mzS97MONjY1MUl/186lqC63tT80W5zXL+tVXXyUmiiF9NZId38k58/rrryc/qA8//BDAlIkCpr5SZKI0gpLVBpLBe/nyZfq7ClTbzL7Rq/Un6nQ6GX83Tf5sQ7TyOhwO03PVz4LPrdtPL4+p1oSSqiG20QnZR8pssozq92cTxA4GgwzDV7fMsM9XRorf9Xq9NG5Yftat0+lk+kn9XGxKDC9UOFHXWuWNQRuJ8OjoKM01zi9qYPf29tI8YxlZb/Xr0LXBrrfLRO1TX0VqullWDePOPuD6ee/ePZel4pV/k6XStdnOv263m2Hm+Z5+v5+R+2Xh+SKp/w37jHL8+Pg4le3NN9+cK/8vf/nLxN6xndhPDx48SM9V1p/zzfZXXfPKRnT0fGgHg0Fai8hMkdn4q7/6q1TP7373uwCA73znOwCma7P1o/fevag/pf7ermlaLxvNzmNH1ZeP40jlNzDtMxvlEshaUV3nPkmZONaP84R1U+bcJrne3NzMpOa4DnhWGDZKdL/fT3sisp3/4l/8CwDA97///bRH+NM//VMA89Y6XOe8/sorw1UodZDSzvccrwk9SNl8N5w05+fnqaNUwADTxrGVW9VBquiAsba2lnGYv7y8THXhYGQbHBwcpM0aByjv0QzkuqjX7ZycB0+46obDCio1L7KHLDVtss+/ifCYCpZNN2w0W2HI808//TRRvQxEwXGpGdpXBd2gcizwM252jo6O0sLE697eXiYPB3+/traWcbjmGHz48GFmw6EbDQ2fvCg03LLmiAJmbfz48eP0Nw9Zw+Fw7sAHzEwovv3tb2cOUDTne/DgQfqdBrKxwTjYdi9fvszkBCqCN549512bxV7DEdsDVa/XS/2r45TtZjfo+n7rDF1XfiKLyWSSyqbySnNKAbP5sra2NrcYA/Pj25ZbNzN1myt6h0Je7YG32+1mFAlqzmZzLvHa7/czJr9qdl63PLfrq9aJY0fN+Ti/eIDiJuPo6Chjoq19qilLgHk56I25quOPckfNWym7NAAE5yvfvbGxkQlPz2fdv3/fPVwB082uPVxtbW1lZJ6uc9YUtwpsP+k4Y100DQLnCDd6XJt++tOfpr7jYZgBdu7du5f2HpofTM1VAcyZkdW5YdeDh+b5AqZ9YQNo/PznPwcwHYusk7p2ANPNL03oNOdf3dDxaueqymx1X7DyWwMqec8A5t0MdK7m7ZevY7+kezoNIATMlBnD4TAdgK1LgVfOm9jnee88Pz9P+wzOL/bT7/3e7+Ff/st/CWC2r/rZz34GAK55eR0B3sK0LxAIBAKBQCAQCAQqYuFgE3rS1tC8wPRkSA0KtUnU/g2Hw4x2Qh2wrXbIc4Cvm8ImVNtgGanxeJzR/LNcL168SIwBy0aNTa/XS8ybamzzzCbqDn+u2iq2l5oasp48zWsWdv6WJ34Nb20dOBuNxrVrKzSUN8fg+fl5YiX4Hanfr776KrUBHXupRQOQMVuoG+qsbhNnqnbWshfdbjdpXKnFU6reCyfM7zRkPTBP5VtWZTQaXZnV3IJz9+joKDm6Uzuu5oisH9+9tbWVNMdkomjO98EHHyQmiqYhrL8mrlQTSL6bV01EvEj4cy9FgQYGYf/x2RqogH2jZr5Wa6wmZNbEcZVhtbV+FnZcDIdD1/QLmI4Ztj/rq2YXnvlenuN1UULEMshjpLQdPbNzapyplb64uEh14rjm/2rqrKZONgR1nSk6tE7KRHC8sIxqvmpNhdvtdqo712LKhwcPHmTSdfR6vUzI42XkOoMqnJ2dZZLhaoJjjh29h21KjTnH2dbWViq3Vyf9G5jKeN7HOUm5qPUtiyK2muh2u2l8cQzt7+8n5o11Iev08OHD1J80g6Zc39zcTOVXVs9q15fpJ48p8cz3NUw4MF2P3nnnHQCzENNcW3/1q18lRop9zT49OTnJ7PV6vV6hudUiKGJT19bW3OBmhCaUZx34vQ1tf3Z2NhdkAphnDa3MW+V+yZpkrq2tpfdzLv3d3/1dKisZUEItKa7L4uiqfa+V451OJ+1j/+Zv/gbAzMLtiy++SHKHe3bKhKOjIzcI0LJMYTBSgUAgEAgEAoFAIFARSzNSageqiT6p4aImhRqkfr+fnmHt0c/OzjKJNVUzbDWaddnWe9pfy0gNh8OkQeHpliyOauOpxdAktpaRGo/HK/fH8RgvmzBOHaiphaAW786dO3PaI/2dhvX0/GzqZgqLtAV2bAwGg9QX7C/acH/44YdpfJGxIDRp3aocQr3kp9ZRv9FopDIW2aZzXu3s7KS/rT9is9mcC2IBTMezDXig7EHVkM1kYl+8eDGXDgCYtXG/309tq/KAbJMNcf7uu+8mlopaWdZJWQNqd58/f576nOVR35aipIgWOl8830lg3k9FWRmbnFod5/kMasjY/8pI2VQR+m6dX3WwU57Ms8/VkL5sV5XBLLcXUIi/Y108n1Tvs6pQR2TPH8E6k6vvDccU7z87O0uy2jI8Kh80oECdoae1ToRlCTWMvOdza/tibW0tyQ7LXN+/fz+ty5raos7Q+5zH5+fnuSHIdQ7wOhwO55Ih63ea4Jt9qMFQyHTzuru7m2Hr1Y+KcpOhoctCmVSbOqDVaqUyKTtOtp7v4vXtt99O7U2GgFr1RqORZAmhPo3eWr/MfLLQMUi5p75gHEPW7+ujjz5KdSETQtbt8vJyLhgP32P3kopF65T3u0ajkQlEoekFOD5ZTl1HOBbVAoJ143fe2lO3HC8K1KFX1onzRvd0LLf6gvH/Ve9TFbYuXr8pI8rxxjHDsfZnf/ZnaQxS7msS8rz36WdVx1owUoFAIBAIBAKBQCBQEZXDn9tIbprIUSM12Yg71ASdnZ1losDxBH96eprRqus7V22nqX4/lpHq9/sZW3PW8ezsLGkmqLHRBKjWL+Li4iI3MlLdoUu9NlMmkJoKy95oklNqaTWUsWWkNJT6dUE1b+pfQm0y+4J1IwMCZCNHqd+XIk9DsYhfh7aZ1ZRrPWyo3q2trQwTxXmysbGR0eyprwrrp9pd1Zjpd6PRqDIjpREG+bd9p8dQv/HGG8m2nr5RjMz3+uuvp/tYN9ZJoxpSu7a/v+8yUUDWd+EqKFNjI86p7LNservdnoukCMy05A8fPkzz3SYDHAwGidXiXPJ8kzz/qUVgf6syW5NL8l2aFBqYyYJGo5HqyzqxjsrYqcOgq+QAACAASURBVB/eKhKqe0m5le21Udo0pK+NNnh0dJTYAytD9Bmcs+12O9enY5HIaXp/UboR6wOxsbExF95d0W63E8Nto85ubW3N1cWrhy1XVXDdH41GqS2tf6jKpKtYKvssOw8PDg6SZprj8s6dO5nIfxoBkEwdwydfBV1bLRuqLJHKb5affqM2afBbb7015/MGzOTbaDRKz9IInxox1CtfXVDfULt3076w/mc7OztpH8f215Q3no9XUdlXsTeyjKKmH1F5BmAujQbHJOuzv7+f+k3XP44LL63IqiM2K1gHtf4CpuOW39l9RKvVutYyWhSxQ+12O40pyi71w+Z+nPOmyAqiDmZqYdM+QilmHSzsFAoKCi51pid0k0fB6Dkme5TlopNLTULyQg1r2fr9fqqT3cwOh8PUcayb1p/PU+GjYYRXCTVXY301NxaFGQccD1LqaEgBT8GiBynPSfMmciXohGf97CZ6a2srUzbP9GhV0M2KbT/dpOkhnOVW52i9H0DmYKSOsFZw9vv9TPvoHKvaDmoKxY0Rn8Gy9nq9NP/VlIWO1qThmfdre3s7jSc+U0M9M3Qrr4eHh6nOXt9XzY0FzDsie7+35h/NZjOzkWef3b9/P3NwZv+pqSJlhneQ0gNVnTLDO0jpeLCmKay35t7hZzq+7Yar6oG2LPTAYsduo9GYC7ICzCse1LySZeTGSBUswHwaDjWBsRuNVckR7X8NnAFM68ZxZde0Xq83Z9YIzOeNyzObr6sufKcGa7L5yobD4ZzCEvCDU3DMqeko57uaO6rpOjA9XPEAo4crYHqo5MHyD//wDyvXj+1szeZbrVbG7BCYmcjyM7b7K6+8kvqVdaLCqNlspnWCfany5iY25EV5DzVYgZWdetC3snrVa7BnqmoD1Gh9rAuK5l3jZ3r41TQ+fGae4kqffx3IU2J56YuIokAddaNoL6+BKLzAJzaNha41Vtng1cN7d5j2BQKBQCAQCAQCgcCKsbAnmZ7krfZxbW2W6ExpXmCq6VNnXT4DmGooqKXQ0/F1ndyV3dDgBcBUa0ktljVTOjw8zCRM1fqznl4iXPvuVZn26albKXX2mWXbOp1O+i01NGqi6DFS123ap9B3WzML1rvdbmc0tlVN2fLeWQZqBqQJIYFZ+3sOnhqmnlo/1aBZJkq1u9T6lkmmvEhIVjW74ZiwCQ3v3r2b2CY6n7/11lvJmZoBTigrLi8vU11oTqHJicn+si00aIrVQGnS1CpQGcC+0rlpwzWrptGmFNjZ2cmwW54ZnCdTtTwsQ9U+KhPcIc/0xDJKGipczd4AP/CMlyZjVcFo7HOVFVDG1yanJPs0GAzSmCLjocED2Gacqx4jVTejYxl3lQ3q8G8ZTGW6bRt45ohlzAoXgU1gCszaj+zEeDzOBNAYDAaZZOXKVtkAFBpK3ZqVamAUPoNyZX9/PzFlVeG1n8of1l2ZOJaD+wWW8e7du5kARGrya8M/6zvtdRFLnbyxYO+xskSTk3vmznlJ31W+5aVFKCpLFZQxF9T1VOsGzKd/YRk5xpQttQmnLy8vV5a+gqi6byy67yb3b/r+MqZ2nom77kXzTPPy1s9l6x6MVCAQCAQCgUAgEAhUxMI+UnoytKEF1UnNJqck46H3E+q4qLbty9ovXlUX+1y1h9dEr6ynZdmOj4+TJtPaCW9ubiaNoGo68nwgvHItA++5qjGx2nOybsowUdPC/72EvHVrM+pg6LzEn7eBNfPspJU9oyaW7CYwnzQXmE+qrAkA9V4v8bMyNF7fVW0fZSqoEeJYou/BgwcP8NprrwFAuj569Cj5TdnwsoPBIGmMqbklC3VwcJC+o8Z2NBpl5JIyP4uGcLUsjWqMWW+dGzo/+G5gPnCL9bO5vLzM+Okpe0PUMW6LbMHzwg1rkAuFtqnHnnk27XXCY1AIy8oAszGmTLudZ2dnZ+lvy7ZpqHNNL5DXL8vWt8i3wqtfnp9vq9XKsEIee699WGdf6TrnMeAsq/XP29jYSPOIeweVeZx3lplS/yll5jXQk9b37OxsLoBDXfXVNVL3Mnw/WRuVB9xXsL9YLu0PL0VH3XPL0+ITyozzXhvshW2tocTtWlOWNVt2X1Q2eIWOf2vRorLAphPRtdeT2ZaRWpXfURmrg28KyjBTZX6fh0VDnBdhYdM+XSCtMNYCWhM/pbB5PyeZTjx12swzv1rlgYoDn2U8Pz9PZbORaJ48eTKX4wKYN9XiYdLLvWXrsqoDiYLvVhrbOsPrwm2j66gTZdly1yHsvQngTbo6I+3VCW+jzLbVg7sXCcoeoDSvTNFYssFAdGNlF7dFTPs8EyIexhkN65VXXsErr7wCYD5Pmd0wcFE6Pj6ey08FYC6aGjdKuinKM51b9CClGz+rVNHNg27CeL+N3qeBWwg10bEmqPour1zLIG+RUpmn9bYbN29u2fFa5n3LomgjqWNAcxsC8+a93GhrHik9nAPzuRF1TPE9ZTdqi8IzwbSHXm+s6tU+Q00sbZ/VbZqoSta8NcMrf6vVck3i+UwrBzWKnw1coUGsbOTA0WhUKc+cRd6hQ9vOkz8awACYD4xC6H5qFZu/sih6p9d33rixiuOrnlu1HIvea8uqezNLEOjabA/kajJWlAO1SHFVB6q26U3uh67CVWtH0ZwoUgaU+X1VhGlfIBAIBAKBQCAQCFTE0mmLLy8vM3mk9IRnmY6tra254BLAfPx6a8LEd+h11VCtH9+pzq/UJtPkYGNjI5NnhWZNGn5Ww+wuE+RgWah2yDq4evlErOmORx+vSlO2ahapLMtVB4qcaT2nfJ0L9jNP42nNejRsrn7nzVPv/zKgI3W3280EYSEjtbu7mz7T8Lc2PwzNXV6+fJkYKJubw8sWr/X0WINFgk0AWe2hZ5aickrNnfX/ZrM5lxePnxFF42JVY7+MKYjHZpSZ+1642uuEF5hBzausWabHZlimHpjJxjwHev3dKs2tvHmb1xeTySRjXVFU3lUxh94Y13lVxFJZtlnZbxuOW8Ofe6aA9jocDjNmnGVRtDZ55nAasMSyHqyX/a3ey3feBpRZL68aS973N1W/IoZb29+aMRJqBq3rhl2Tl7H+uAnchjJetQcswyytej0KRioQCAQCgUAgEAgEKqIWRsoL/Wk1TJpEkFoYq9lT3wNrn8p3rQL2ROv5BmiYad5HXxCtEx3hqeVqtVpJC6/BAGwyves8+XvaF8/unrDal+t2aLxJ7Xad9fPa3WOkPJ8P6/uj88vzi+DVY1PKlK8syLyur6+nv3klC7W1tZVxJj87O5sLZwzM5s7R0dFcAADAT7Srml7PF4z3LNqHeb9TDZkyU3lhfD0fCe2XIq39qlHETFVlgz2/qVXLB6/cHiOl88YyUoSmC+BzdVzZMeatTddhPVGmTfMc6fPuWVV5i3xjFmGpeLUsFcvf6XQyicbVx9Qm8FVLkzpQxLIU+RUqi1vmmd73y8y1Otb0OuZ6XuCY68JVzEfe957vIpBdkz3G9SbhyfjbUC6LunyeVlW3YKQCgUAgEAgEAoFAoCKWjtrnaVTH43HmdE7NUafTSX4V1ndBE2mpNslqelfNTKjGi0yThmZneViPzc3NVDb6ctAfSv0jqNGcTCZuhEP+f13Mi/eeIrv/m8Z1a0pW9b4iraXOAc+3x9POVmGdripHVZB92tzcTAwtr0wB0Gw25+YRMNUEk23inOH19PQ0zR8b0h3I+oS12+2MhrrORIhlWTwrn1RG5kUO9LTURfet0mdqFT4Lq2KmyrSVRtXTuWQjvakfrOffxv/tGCtbnjpQ55xetKzL1NFjvzyWylsXy2jztb8sS9Vut5OPHGWSRj21UWlXhapzva53VUVe2ao+07u/6hi6buuXq8pAWPngRcC8KlLlbcFtKktZXOWfeN11Wtq0D8huHDxTAjXx48HCUvy62SgK0VoX8ihNb3M6Ho/TwsvyUyhvbGxkspFzIwjMHJ3z6mfxTRzYN42yG82bNBO0Zcj7LG+ToH8X3a8ocgCuow0YYGJrayspFmwYX1VC8GB0fn4+lxWenwHzASWs6ama3eqByh6uyrZPHsqaPJRpz2/KfF7l3LiOxc2a9k0mk4yTtwZH8swz7UFKZXZRThiirrar2lar6rM6n1FknmMPVzpeyh6u8t6jJs56uOKzbzLwk+I2mLCXcerX+xd9z1Xvuur914EyezNttyKl3W08QH3TUcbc77oQpn2BQCAQCAQCgUAgUBFrt8VsKxAIBAKBQCAQCAS+KQhGKhAIBAKBQCAQCAQqIg5SgUAgEAgEAoFAIFARcZAKBAKBQCAQCAQCgYqIg1QgEAgEAoFAIBAIVEQcpAKBQCAQCAQCgUCgIuIgFQgEAoFAIBAIBAIVEQepQCAQCAQCgUAgEKiIOEgFAoFAIBAIBAKBQEW0ir78+OOPV5Ktd21trfZn/vCHPyz10Kp1YlkbjQaazSYApCu/m0wm6Pf7AICjoyMAwPHxMQDg9PQUk8kEANDr9QAA9+/fx4MHDwAA9+7dm/vu8vIyPesP//APK9Wp7nZdRbLmjz/+eOF+Klu/KuW+6pllnlW2Tj/60Y8qNWjRuxuNqQ5kbW0t3WevHsq2Ydn5VFQnLQ//5lyYTCbp7/F4PHfV7+zVQ6PRSO3hXTlf/9t/+29X1qlsH11XIvOi/qqjj5Z5fxlUbaeqcnyZ+VumbleVv8wz6libvPcsWreqv/PuX5XMu0lUnU9eu+hnnjzm32xnvXqf6fWq93i4DhlRBraMy8iVMnXy5pK3Nug6CkzXoNFoBABpHzYYDNJ3vK/Vmm6dO50OAKDb7aZ1hs/UNYt/e2NimT0RsUx7LruWee9eZtx5bcS25T653W7j/PwcAPDkyRMAwN7eXrrnvffeAwC8/fbbc2V89uwZnj9/DgAYDocApn3X7XYBzPpOy8JyFPVTMFKBQCAQCAQCgUAgUBGFjNSqUPUEvAoGq+w7eUJtNpsZjQPrMRwOcXp6CmDGSPE6HA7TaXd9fR0AcPfu3cRE3blzZ+6Zg8EgaearQtu1bJvZ+/R/209lGI9VoEr/16FBLvusm4SnyVx03NSBIk3sZDKZY5uAaVkvLi7S3wDm/uff3pizmlqdm9QU8n8gq2Vapm7XDavBXuYZdf/2JuRyFdzm+VsH2P5l+2jR9ijznn+K8NpWrVT4mcrlvDZcW1vL7C9UbnnrtH3nbemfMuVYZK9SN9RawWtDrlVci0ajUeY+rjeXl5eZfltbW0t9b3+n/ReYIm+/qXsctQpj/5AxJIPY6XQSU8g9t7KDdi/ijcWqfVN4kKpjgtZhZnUTG3c9QPGqmzMAiQI+OzvD4eEhAODg4AAAEu3YbDaxvb0NANjZ2QEA7O7u4v79+wBm1DBpxouLizQwloHXZkUCy242G41Gpv914FUxJ1sGdZrz3cSz6kKeSUir1cqYyN1EufLM91guzhWWcTQazS1QwGzBuri4yDXp07nJRazVauW2j52zVet106hjnHnjwqvfVeaTeShSxlT9LLAY8sar129FyonAYig66BDj8ThXVquZmVUGNZvNjAma/n1bD1Rlcd3jUPd2bGt9L/di7Cv+PxgMMm3M/Zs+QxXtdk28iT6qQ5lynfAOVHY9bzQa6T4eoLjn3tzcTP2yubkJYHbYury8zChwgeXHXZj2BQKBQCAQCAQCgUBFlDLtW+S0lmeSUnQi1pPnTcAzl1KTIX6mTBQAHB4eYn9/P/2t2NnZSWZ8r776KgDg0aNHyaSPp2OepgeDQTph142itqfGpMgRU7UrnulVHjW7CBZ1BC8yAdN+LXIEXrZcq4CdTxyX7XY7YxpntV+rhLKUNmiEsk+cM7wOh8Ok6bPfjUajXOdk1SIqg2LNbT3NbRncpMZuVf3Ffihikr35W5ahquI47z1D7y+LOtuqaiCHslg1S699Z81UVNNund+LTFq+KajD0mVRlJEtHgswHo8zsppXABnzZF7b7XaGpfLm320xG1vVfKqCq/YBwLQNPUbKmspzneIeTZ9Ptw3PtE/vK5KldaNI5pbZ/xTJheuUEzqevbnB/mG/nJycAJgGcGNQCjJSKvu4z6jTtC8YqUAgEAgEAoFAIBCoiFqDTXgaSQ9F/jW3QZOimgrVAFn2iOzT3t5e8o2iLebW1haAKSP1+uuvAwDeeOMNAFMfKb6LQSn4zNPT05UzUnoSt5oS1nc8Hqe/aW/qaZC9kNV1OMiXqUfeZ9ZWWa95YUiv0s7epMbWYxI4PrvdbhqXagdMrLrcykxaLauyTyybXvm3ZaQ8nx7VzhJeQAnr29hutxf2k6oTNzl+dI7m+bF5rIbKhjI+lyofLJvthajX71bdPkUaas+PSFk8i1WVtWzwCFt+z0KA33U6nTRnvDQDFrdhrtSNVVm6aF94Pkx2vHtBd4qYKfZbt9tNazA/a7VamXfq+LltDGNZy6RV7x2AWX+0Wq3Unh7Dxz7ifowBxRQazMDzy/X2XHqtGypLvTFpyzMej3PLaJ9LXPfY8vyiG41GmktkopQx3NjYADDbh7MPlZGy71gGwUgFAoFAIBAIBAKBQEXUwkh5kbJs8lovgaiXkNNqAq86uddxsvfKbxmpy8vLpEF/+fIlAKTEXk+fPk2nYtpmMkLfW2+9hW9961sAZj5SvV4vc4rWBL5Vo/aVtUm2ber5rrD9G41G0tawTrx6EQy9ctStdSnjg6UhTVl+ZdmsJtDzsyEW8d1YBdT/iGWkhvLOnTvJV49j6joZKW1PZaCA+bCk1Ajp1fpIaZ9Y+UGor6L2s2pv9ar+BatGkV9Qnm36dTDwWp4iTbDHVlXRpHoMk+dzWZQYs2547W3fz7GztraWy9ro74p8EFYFll/lm8ouywITm5ubSW5b/5zBYJCeoZpeLyllnXXwcBvkbFWopjxvvwPM+kmTiNp1SCObEuyT4XCYWYM7nU5mj/JNasPriiTnsXO6fijDR/BvzhdlpCxjRsZjPB5nWBNl+evomyJ2XK9F/nTeuMvzsdbf6jj3mK464a01tm2B2T6D+x/OrUajkfqFPlK0HtN9iu4t8sZI2bFZuMMo63zvOSnbzY4XytOjty3VreYoee9fFHaxV4c2u4EbDofJDE8PUMB8gAmGOn/rrbcAAN/+9rfTQeru3bsAphOTnc9DGQ9S5+fnczT/snVTeA751mGP9PVgMHA37cB0cJI61U3IqlBmHGpZdRMNzMZSv9+fq59Ccw+U3dRdlxmqZ9rHBZXjDQBevHgBYLYAXIeZjhe23+Z1OD8/T+OL136/nzlAeWYdVn602+1Ud47B9fX19LduNIB5E5hlUTTGdYNrryojrXzzzGvrBtuE7wOyZhzeoUlNkbzfVTlceeZ+1wHvcGgPUDTPaTQaacxyDGs/Wcf066iHnROe4/V4PE7zkPKN9d3Z2Un1I3jvyclJ+ptoNpsLO1wXlb/sfd+kwwCQ3UNwTKkCR83gOb64Rumh2AvIw/9tyObJZDIX6ECfdVvasKyCdxXl9dxFvP2erhO8VxWvwKwfjo+PM8/ink4PbNqnHpFQR50Ib/x5ZqAExxbH4XA4zLgGFJmZ6juvUw7aclxcXMzJMWBWp2azmTlIsX2UPCDqUJiHaV8gEAgEAoFAIBAIVMTSNi+qyfQCD3gnZntS1tOgl4CTqJMB8Jx2VZPMz3h6PTk5wd7eHgDgyZMnAJD+HwwGePDgAYBZQIkPP/wQAPDee+/h0aNHc3U7ODhILBZZBDJSw+Gw1hO+FwCEGotut5txtqQ24uzsLJWJ4Ml/e3s7MSE88Xe7XZdKXhRFfa3f8Z3Uzq2vr6f6sS7U0j5//jyximwLlr/X682Zt/A9eVr3VZsj2P85t1hGapnv3buXm+zvOhgz1Z5a8z2l3JWJ4v15TFSr1crUkxqmra2txIzys/X19XQfx4H2ZZX5VPZeK9darVZ6NxkgZWutJlADbdTJAHigmbG2RVGgBUJlsQ1OkSf3+X+ZwDarHp/6Th1rlA/sJ2qVlTmknGB/eUEEVl12vSpseOzhcJjGFy0c+P8rr7ySZBzHI+t0cHCQ0c56CUa9ct02XKc5s/ZNHtvR6XQy5ngasIprkq6f7E879gaDQaGpPuUOsSpT2bJtrOyLukYAxYFOrgu6znBO6Li3lhyU1WdnZ6n/eA+/G4/H7j7SppVZBjrubPvruFNLDX7G33JM8f+zs7NUB+7vlKW27h36rlVZGNi1Qy3ElC1kublP5f+dTiftEdgWup+nzFOWjWCdvIBXRQhGKhAIBAKBQCAQCAQqYmFGSk+N1sby7OwsaVx41VMuT//Uxqg9o7VZXWXYRevA5oVWpFZ9f38/MVH0jWLdtra2EhP13e9+FwDw0UcfAQDefvvtVCcyWIeHh3N/A/OhG72T8qLwHCupXVAtGOtLbdje3l6qJz/jSX4wGKR2Ubts1erou+3fddaJ71RGguV89uwZAODzzz8HMO03jlsyiLu7uwCm/l82TLCXGPYmNLI6x2x979+/n+aW1aaohn1VWlodE9Y3Sq/8Tn2qCBvavNfrJZlAzRIZ0O3t7Tl2Cpi2hcdEEavQflrto2oCKdfYV5PJJM1v1tv6pljUOc7u378PYN4awF6VBSxilj3/KRvoQO3Xtb+tT5xeq2oAy0CZXG1v9g/H1sOHD1OdOGZZVsp41fKzX6/DYd5jpqxvl/rQ0MKB5X7jjTfSfWTeKLsbjUaGkbq4uMj4vd6EzPMsRgjtV3sF/GAmdcILE22ZlrW1tSSTKKc2NjbSHGO7c23d2dlJ45I+2OzL8/Pz9FyV9Xm+KtcRXKdM4AP1E7NWExcXF9fCShWF8tY9Eee0+kISGtSA65hld3U9K0oDsQy8MW77vNfrZdbMXq+X2oGsE8ff5eVlkgccb/wfmDE6fKamBroO/3g+3/qFDgaDjF8/+6Db7abycg7q/PEsYZYNsFM444oWCs9kTDuXhWXHsZP6/X66j5107949ANNNrTXb6XQ6tTrNa1nzIu3oxoflfvr0aTpYkErkQHr11VeTKd/3v/99AFOTPmC6ePF+fRYPUmwfttcieW+KhJpufKxp371799KBgoss3318fJzq+8UXX6Rys8w2ipya0XjR7+pcjPUAbANEnJ+f4+uvvwYA/PznPwcAfPrppwCmbcwgIIygyEAgGxsbGefF0Wh0oyYIuomyG009SLG8NojLdWyAdMOcF4VPy66bdZunjHW6c+dOGo/2ur29nTmw6AHBjvvxeFxL8BaFt8nTjQ3bX4Nv5CmWrsMkSc2H2Ma8qvlRUcRDu3B7ZnM6Fig/2Qbn5+dzf+t3egivE5eXl5lDnkYj5QHztddeS/dQLrOM3Oi22+2Mc/8qzc7tAUrntDV7VbMimvax3u+88066n0ojjkHNQcfnc27Z8tRdR4WaKunG0DrN87vhcJjqYJU24/F4znSdV7vGL2P+7Jm82jHR6XQyQX82NzfT/oZ14px5+PBh2gfxHq63+/v7aXOr77OHSPblqkytrvrezrXxeDwXyGUVZbuqTEXQQ4EepKwim310fn6eCRaipn36XF7tuNM1uurYK2Puu76+ng5Q3Nttbm6m8rFu3JOOx+P0N2Uf/9fod0Sn08kEr9G6LSMj8n6r/aSBPCijOTd4z8bGRppD7FdVqNm9VNGYLFufMO0LBAKBQCAQCAQCgYoozQHnxVlX8yqeXnd2dlKABZ4Wqdk7OjrK5FLwwk574WrzylIFXjheL9Q5T+U0D3v69Gkyw7PmYR988AF+8IMfAAC+853vAJiZiwyHQxwcHAAAvvrqKwDA119/nTSH1HYoO1YHNW+ZFDVbYb3X19dTXeiQTu3s+vp6YjpY7l/96lcApgyV1dwq41U38hzkm81mKj81lE+fPsXPfvYzAMAvfvELALM2fu+99/Cbv/mbAIDf+q3fAjBjQ09OTpLppoah9cIPXzdU+8+rmutYh8rrZKSKcg6pdtnO4VarlQljTi3S3bt303jklYzU1tZWup9yQ+cvy6BBMOpmO9TJXNkna85G2Xd8fDxnuqtl7/V6qR08s8Q6QE1xq9XKaLG9wC0sR7fbzaQS8EIs2zYfjUYZpkADjti8H6rprQMek8uyaVAQMjSUeScnJ6ntKfsop9fX19P4JDzT2brnnBfkw1p0ADMNOcvNtWpvby+1ATXVrPfGxkYmYIiy8KsKp+2xbdbMfnNzM8lmDQYCTPuEVhI2JclkMkntQsZR19Q6AmioRtuyMfod209TVXAeUa7x/wcPHqTyKvsOTGXe/v4+gFn/at4iG8hBU8ysGpeXl3MBuYBZn0wmk1QX1leZn+sqn52bXioB9sNkMnGDTgHzTDvHGGW9Btjx8uPVsX/Q9TXPnHN9fT21OcfTnTt3MqanytRwr8s5RNnRbDZT/XSPr64DvI//ryJlgvaTBkWzAaw0wAStMPg7dUGw5VdrkkXLH4xUIBAIBAKBQCAQCFREZeqjKOwvGSnVKPPkzhNzv99PJ14yNfz/7OxszgnOvrMOzZiW2zJRGpqUGiAyUhoqlpqyb3/72wCAH/zgB/je974HAHj99dfn6vvs2TM8fvwYwIzZOTg4SFpZe6pfxEfKg9WYa7JjbX/2AbVILNc777yT+pN+RaqpJiPCdjo9PXUzYy+LooAVk8kklZf1+OSTT5K2kmDgj3/37/4d/s2/+TcAZlpo/m5/fz+THFmT83l9cp323taulxqijY2NNP9uwjlc3+VlHwemZbXfra+vJ60RNa+UGffu3cswUdQ2aXh7tZe2fjoa5rVuHym+E5j3D7IsNn31Dg4O5hKkArPx9/DhQzdpY51ji5pGTTpt/aGUGVOfNX7vJXm0a4GyJtZfQn3orJxYlR+iFxBDEzqzL6i51fupXWef5smhvDm3Kmbq4uIiPVsDM3EuEeqPyzpw7im7q+syMNVQW0aKWETjrO3jrUl8p2VvdnZ2kp8xKmMEUQAAIABJREFUAznxns8//zzJbbVE4DPZn8p45/l4L9NP4/E4o+nXkOXWD1L3SjbQ1tbWVvrMhq/W9A7eemvnoZdqYFW4vLxMdabcoyXIy5cvky8yfchpqdTtdm/cX8qzStJ9oWX1xuOxmyyZ33ljrE5GqsgXXn291NeYV45TygJlajiH7L5c/Zi5Rg+Hw9zURnX7whPqa6bBSshIsS/YBmq1Qmi6EY85DEYqEAgEAoFAIBAIBK4ZlRkpa9/sJaXrdrtJK0RtEu2yO51OOklSg0EG4euvv87YAvf7/Vp9PrxTKJ9PduPw8DBpcVmefr+fNEaM9PZrv/ZrAIBf//VfxzvvvAMAGc3R48ePU/3og3N8fJw0BNYHwWPKloFq/6wN8+npaSoHtRdkn95//328++67AGYaW+Ly8jKTfNQLZ6r9tagmM+95wFTLwDrpuKHGlf3zb//tvwUA/MEf/EHqJ97PPvnkk0/S32pH7GllbwOUycwL3XkdzJTHQllGygtTv7W1lbRcZHh53dnZSWyBMlHAtE/svFWfHNaZmsK6NYCEDUd8dHQ0N5b0+vLlyzRP3nzzTQAzVmAVYb8tvvzyy/R3kb+h9ZHShJU2ibpG9POin3rtbhko9ddaVfhz24fNZjPJLutrp36kXAuoyfSY10ajcW2af20zGzG11+vNhTrWcu/v7ydNs11zNjc3098eI+Whjvlk/dZGo1EaO5zvm5ubib3gesv6DgaDtBZ7bKg3b1fFGFo5oAntbWTBwWCQCb1M3L9/P/Ud90q6r7K+PC9evEhj1D7zuvyj+E6bOoUWOE+fPk3jkP7k6hvmlfM61tciZseL5uxF57RMt8dIec+vC3mh773y63psmZeLi4vctCWtVit9541b792LysOiMPVeePLRaJTOEewLTatko/UpI+U9n1i0/IUHqaKBoYugOnIB080sTQu4uXjllVcATDcQ7GC7+dGFWxdzawqyjBD0QjWyHKQ99/f300abwqrT6SRzHJrx/cZv/AaAqYkfFwB2Ls34Pv300yRYuKCNRqNM/gI106gqCL2Jbilr3XCzPQ8ODpL5EYUg2/3Ro0d4//33U/2AWchwNZVgn6vDsLeRr0OQWNOQ4XCYGUO7u7sp0Mdv//ZvAwB+//d/HwDw7rvvJvO9v/7rvwYA/MVf/AWA6aaXz6W5hW4ur3NxqgLd+NxEYAwNTWyDxrDtut1u2rSq+a8enIDZgX17ezttDHmA0vd4uYFsXibdeFYx7SsrWzwzJRv2nfXf2NjIOABr/fJCjCuW6UvOcc035F3tQcdzalZ5kpeTqt1uZ0wB9eDl5auqc355ckc3ony/lWEnJyeZnCSqKCgKWFAnikw7x+NxZtOu4X45ztjup6enKTAQZbyGvufmnfUGsvK7rvrmhXTXQEU6t7n2UmbzHi0r5xEPXePxeM7pHPCD0dgyVYE9vOnfGnCGn3FfcXR0lBR/vGq+L8pDa+Z49+5dN18R77MbyroVaFelwWE5KNt5EByPx2k88sB+Feoec96zdfx5e0rP3I9XWy7vWXmmsYpF6lcmTLeX17Xf72f26BynOoa9eltZ7R1q6oZNo6Prgwb+sDnwOF/UZJnzTPM42jWhjvrczt1hIBAIBAKBQCAQCNxiLG3ap0EMNMGi1RBrQArN9A1gLgGapymt01RJT7ZKwwPzznY2yenu7m4KbU6TMTpQ7u7uptMxtb806/n888+TmSDbR5NCKhPF7+oK767lV3aFUIc9aivJID558iQxaUxoS1O/1157LWmaWH5g9SZlniaTdSKrsbW1lZg0JkfmOPvkk08SE/Xnf/7nAGYh3S8uLjIhWj3TvusM5EB44T853s7OzjKs3HWaH3pJL22SQ09jfu/evQwjpU7wHhMFTLWcVqvmOQHzOhgMMpqrOsH+6Ha7qR40jyX7tLa2lkmSqMF4Vs10qJmahiMHZmy8holXc448awAdk7a/NaADr+vr65nPdOyoHKkLqj21rBkwYwMon/f29lJbaYoIXq+LkVJYlnIymWQCqmxubiZTNzu+gPlQ6MCMMWg2m5kAI2traytLn5DHbgKzepJVOTw8nDONVWiCWsoVBnm6vLycSy0AzJs21e0ekBeM6OLiImPp8vLly7TOsk7smxcvXiSLD8pBlnsymczJUmDa99YcVpnKVa1Tduw3m820n6MbB8t9dHSU5g/HnA2MdBMoYpGUhbfmlBqsx8oC71mLJN0tgjInXiAMYD75uybn5hrI8acmyzZYHMuvQWzYj7rOW3im1FWQFxDGS4ukjBS/t+cKwE/YXWS6mVeWqxCMVCAQCAQCgUAgEAhUxMLhz71AApqYjZoWnoo1GSxPjBruGJhql6wmWk+/dWoxxuNxKhNP6Qz7eHx8nOrEMr733nuJ4WA4bdplAzOtJtkbXp8+fZq0v3ymamytT9iy9ppFPg2aMBmY+q1Rk0cneLJQBwcHScvBdqJGs9VqJc0ANRYa4nrVCRzVD4xjSZNsUvvFcv/DP/wDAOCXv/xlYqTYP9S4ewEO2u12Ict2XeyUF0aVdXv58mXSQFn/uOvQ+nG+qs+MOn0C84EllJHi38pEAVNNsjIHwLxPptXIa7JXe+33+7Un5AWyQTY2NzdTmTn+2P69Xm9Oo6e/n0wmmZQPdQcwUP+EvLmkDD1lhvpNaZhf3u+x3sB8QCENSqO+OcC8r05ZH4qqsJpj9blVn1VgKt84tlhG1k37V7XQq2Lh8ywLdLyQlVFWk2OPa9NkMskwb+yn8XicCYOv60/dDLfHRAHzvid85/HxcQrSxKBUxGAwmPNdBmZrmo7LVTGIXoAdG8hjNBplmPPT09PUFzbw0+HhYVpf6ePLdajZbGZYdfUn12ApxHUFQVHfWI49lrvf78/teVhu/m7V5QL8fWpRmga93/qqeSkidCzbUPh1y4eiVDCsx/n5uetTyPpR5lE+NxqNtE6z/5TJtelHut1uZo+q4dDrGHdFIeM13YgNMsH9Q7fbTd9RTqjVThlGqnKZl35CIBAIBAKBQCAQCPwTQyEjVRStRW3P1WYYmJ5kbbhjamKGw2Em6o36M3inxDqjhOg7WSYNBw5MNQs83ZKx+fDDD5OPlE26++LFC3z22WcAZj43jNr38uXLzMm51+tlolrVHRnJs2W2PgqvvvpqCgdOlo1asWfPnqXIi9Sssaz8PeBHXvKwaP28CFyejboymWqTzroAUxaK/UJQC/Pw4cNMxCvVrqwqfG4ZqD8K20ATEVO7aaPdXCcj1W63kzaWn2nCXctI6Wc2DHWz2cyEyOYYPD8/T/OU/Xx8fJz+ttGw+v1+0kbVCav1brVaGZ9PZWP4mUYdYvmUbVsFqK0fjUaFvmT6N7+z92sYXFteHaM2Qp/6QXnJfeuU8cp8sL1VC6mhwYGZ/N/f3091sv4Cm5ubrm/AKuRAkcwDZn3Asb6+vp6xoGCE2bOzs1Qn1pfjUtdbfuZp8OvSqnvyO6++OvZsu2uoad5vGWz9rm548lXXRMBPBq0hm20UtfPz8yTDaBlDH0sde9aHXMuhTONN+CBZ1qnT6VSOJLiqMOEK9e/XEPyE+ggB8+kCLGPNOmsUaPXdsfVfZg55PoVaF2A6jijP+O52u51JyKv++mSb+AxlQrmGKyNl52PRPqkOeIyUJtbVaH0so1rHAZjbA3iRCJctd2nTvrwB3mg0UsG0QnR6VSqOV7sAeyFmtZJ1Ti4dcDYUKRtf82Ax9PcHH3yQnClZTw7KL7/8MjnGMmeL5u6w7dPtducmIOtJVO1UbyGyFKvm6tCNDDd/7C8eEo+OjtJByppnjsfjjDnPVX20TJ0slPK1AuX8/DwdMmyOqePj48ykU3M+u9n1ynHdByhgfo4RZQ5SywYuKQPN78S2taHOt7a2SjmtqvmYDXHODcjZ2Vlm3mrYarYL768z2ERRW+qh3gY20A293Qh54evrBse4OkTbw5uauegG0N6nV1vuq3KMWJMKvdaZO0/L4OWB4yaC8o3zhfIcmHdYBqay24ZNvw4U5WrRsO0sE+cX040cHR2lNmD9WO/RaJRRCGgaC2KV4bS9/1kO729bnpsMWqDytUxOpLW1tUzuJw3lbpVGlG/b29upX3W988zReM91jVENMGCd/4H8/rmJoE1FKSs0OJpdR3XPwPutUkjN2vT59rNlcNWhEJg/MLBOqpjk96xHu91OhyQbyKrRaLg59+yea1UmpV5+Jx1rLAfLpgdA1pNziG2he6k6FWNh2hcIBAKBQCAQCAQCFVE52IQHSy23Wq3csKBKeVsHZtWYrgpKpVM7xHLwpLq9vZ1CGDPk9xtvvJE07Db4wqeffpocY8l+KHVqTeo0rPaqAjMQ2rZWq6waGWq82AZbW1sZZ0s1lyrKdL1qXOWEqKwjMG+eY8PI8tpqtVJdtM1WHdK9DLxgE6zbixcvbtS0jxorL+S1fmfHu4ZxtuVWh3obrlsdtjl/T09PM0Em1GSjSkLeZdoszxxRwxGrtpLXVTNSyq6USepIeJpGrYfnvM17qjC5dZs163M9poDjjvOG6Pf7qSyWkep0Om4yzlXLBc9si2A9zs7OMswSWUgNSsDxSGZKy1/ESN00blL2evDcDqzMbTabGfPWTqeTZKImCAWm/Uq5xr5W5tEGVVKW2wu7fZ3QYAP2/Xl9d13BJjxou1kTy7W1WcJ3y0htbGyk/rLm29of+vs80z4NirJInexvdU/tMYQ2aJAGFrKJq7U/bUCTdrudCeRRt2mf3d8p26frpk0ETauXRqORsUrStvAC2S1b7tslMQOBQCAQCAQCgUDgG4ClGanLy0tXA+KFqgbmtYQ2FObFxUXmtFu3NkoZKdVCALOT7SuvvIK3334bwCzJ3M7OTiZs7ueffw4A+OKLL1JoWWqa+Ez1HVFn+jytX131LQpvrJpMaicti5CnYQHmHXuLtNB1w9MIe9o41lPDTwN+iG7Vwty0Zi8PnmZdQ/dT+3IT4c9V2+r5N7JclqW5vLxM49DO+YuLi4xvlDJT1g+q3+9ngid4jM8qocFWrAOzagSJq0Kd1zmXtD/yQsuqv4fH+BaV0V49tkrnqpUr+lmdyPMpsD5zqsG1fqSEF6L7OuH5nahfhE3YTS2zBv6wTL06WZfxa1kGdTh03xYUhYXXfrBWORqQR304gXkm0KYdUZ8n9qEGaGE5VhWspmwY/G9K/6pFjvoRAdO2tvsHyoI7d+5kgtdo2gAr13TM17GnKPKRUhlm2ZuicjSbzbk9n37XaDQye/Vms5kpx6rWV89HSq1YuJeziXgvLy8zftQ6b2x8An3+omO48CBV9aFqOpbnhOkVXt+3alMqm9sBmG20GVHstddeS1nGNUM8gy48efIEwCywxNOnTxMtzzpphDubDds7RK3aoVc3szrpKKxZfpZRozFayj5v4qzCsXSRzZyN5OQ5vBPaJlUn03WaMtrDieYm0cMJcL0HKW/TbQX5cDjMbP7Oz89TnewCNBqNMrmiWEfNC6VzOS8fSN3Z5YvgLaT6f9FvVglreqN/qzmllUtXBfopmu9lDlLa/9exGGv5AGQOVHltkPccD9cZmEHNdWwOGz0I2gOg54RetZ7fNKyiXp55lncg1c2ojXyrMsweWHTd5VjVvrRmhbZsdcN75m0fL0UH+PF4nIkMqaZ99iC1sbGR6SPNuaTPte9etTJP61gUBdYjOIr2pXYv4eWZqxtFe1eu85PJJBPwg+4paiZLpSt/rya3RSbMVesWpn2BQCAQCAQCgUAgUBG1BJvwTm/W8drTXNyENsOjBqlVYEbxR48ezTnrAlOKkCZ9zEXE/EQnJydzMfv1mRrqfFUBJvR5nsM1MH/69lgYtouGA7XmIkXa3bLlK4tFNcBF5kueNlefU0ULcd1j175PzXqsZv0mtMteEBlPo6paVMtQqDkeNbVe3iNrOqfh0otCcl83botpqGomi+ZGmc+IskErtB/yTAGvu488ho7Is6S4LZp3rxxeKHoN/GR/WxSu+LbUsyqus9zKSliGQs2i7Pqpn9kcRcoq2meqOVWRWeyq2qDIpFU/u61jx2M4gHk3AA2PbYMTcf9GxkOhLgKWfVq16bJi0SAWau1SxNBcx57CjjP93wbJAGZzx86li4uLZNJn3R7a7bZr2rcsgpEKBAKBQCAQCAQCgYqohZEqgzrDIi4Dz8mOvlG7u7vpf35HH42zs7PEQDGwBDNIq5OyDYvphX5WXKXlqYqiNrKaB2URLNR53rPdvg3ap7x2ytOeFLFOV7X5TdbXe7dqZqw2+rqZKGDe3txqTTW4ipbNskjKaFmfJ2WrPD+oVYcQz8NNMF5V+9djIMoyTGWYmUXl2k0zdkV9d1ssKIrK4DF6VgbYRN56T1n/wbrkym1ovzrhtXuRz6Hnj0KZ12q13DQCgG9lYd+vWCUzVTRe8qxhbgJlWDQvMNXa2louI0UrI4UN1AD4PlJ5YdBXgarPXsQPdpXw9m3W7xpAJt0K+0n9Dbl/5+81NdNVsrQKgpEKBAKBQCAQCAQCgYooZKQW9YnJ++2yqOMUT81Ot9tNNq9kpLa3t9N3NlTsixcvUgJeRu/T8M3WTlPDRV5XVJ0ieBG7VNNVVK6y42DV2otVjL3bzEQp8uy8NdnrTUAZTcssqRbVll9ZJMtMXVxcuMmj7e/KaOTrxE22M7FIvRZlvdfW1kqxRlXLdBva0cNtLddV8Jh29fXKmxPf1PreJuSxrFfNHc9XsUz6jTIRWVfZr0VMj1eOm14/r4ra5/WDtdLJS4kAwE3uStw0414VN9FXZceR3QcAMyaK+3j2gfpRq+8bMN2X27RLdcyXyqZ93/RFU8N78+DEKzvm8vIyhU/k9eDgIJny2XDT7XZ7LvOzXqseoq6zvbwN7jLvX5WpQZ1BIG5zQIkq8A4PNwE9BHGse+YS3oYjb+OgzvOeuUtVGv4292NZLFOHRcfHqkyMAz5WpQSIvqgXRQEWFjUX1YASlKNXybnbYDoH3C6TPg9FfeTlXLKKOi9/JqGmmnZjrgep2xD86JsGbU9rGqmuOdy3s5/6/X66zwa+UtM+xbL9EqZ9gUAgEAgEAoFAIFARa3FCDgQCgUAgEAgEAoFqCEYqEAgEAoFAIBAIBCoiDlKBQCAQCAQCgUAgUBFxkAoEAoFAIBAIBAKBioiDVCAQCAQCgUAgEAhURBykAoFAIBAIBAKBQKAi4iAVCAQCgUAgEAgEAhURB6lAIBAIBAKBQCAQqIhW0Zcff/zxlUmmrspibfNUXV5eZjK4MyPx2tqam4W6TFbojz/+uFQ67R/96EeZh9jnTyaTTCbl8Xicsl975WG5bZ2azWbKpMzPGo3GXJ31qvjP//k/l6oT+2ltbS29i1fW4+TkBEdHR3PlePToEd566y0ASFminzx5AgD45JNPcHx8DAC4c+cOAODBgwcAgI2NjZS1ezAYAJhlBr+qTj/84Q9L1enHP/7xJTDLHK7vOj8/BzDtp42NDQDAzs4OgGn28ZOTEwDAixcv5n63vr6Ora2tufoy8/VwOExtpX2YN76175cZe95zvUzx7E9mV2e5RqMRzs7OUh0UnU4H6+vrAGbtqFnC7RjXupatk46968Kiue/K1In18d7BOuqc1izpbE/OBY6t8XicaWP2R9EY87DIuNM6WdmlMk//5tWOkaK2p1wB4Mo8KxutDASAH/3oRysZd2XGzKqeVVbmFckHb60pU6ar7l103patk449jisrf8bjcfqb96ytraU5QplHmd3r9dDpdADMzyO+x87Di4uL9Def743nsvOJa5O2nW1HHe9F9ymK5qZ3r7e3sijbT//1v/7Xy7xyXzWHvX2cXvVvLaPurfi/yh79TvvwP/2n/3RlnTiXPJmn79B9GjAda1wzdbyxflxjuZc6ODgAAOzv76fPKPc7nU7aO927dw8AsL29DQDY2tpKY/eP/uiPKs2lIiwynxddT4veVXbc/Zf/8l/SuCPseFf54M1ptjf/H4/HmXGkY9OuTe12O33GPlG5wvt+/OMf59YpGKlAIBAIBAKBQCAQqIhCRkpPnHmn1jztltVKqFaAf/MEqdoNe1qson0rAz35euwXMK+p4ElYv7faLWVj7Hs87XWr1cpoprW9qmoVrtKeszwsr7IwvI9aGDI8jUYjMTnUCLIt8rRtVbSiV0E1Cnw/r9QK9fv9dB/LvbW1lerc7/cBIDE2p6enGe0mNZuqfVet6HUxLZ6mjtB5oQwdMG0L9if7i/d2Op2MhkW1O3WgavuU0chW/W5VsJpM1Xp780vZKWBeTljNGL9rt9uV5tKyyJN53mcqq62Gz/udJ7P1WWwfq3Gsysp59chDne14k2PwOp/lMeKLQtd/O58oh0ajUZJhytyqHAZm8h9AhqHn/8pI5Y1DW8dl2thjonj1mJqifU1eWfPYYjuPyrDGefA08LasVzFSRJl2V9ZA2YY8Vn80GlVatzwZ5jF8dh/W7XbT3oB7Cn53cXGR1lha6ygjxf0Gx2Kv10ts1ubmJoCZdQ8//6cO3c/YNdXrJ68P7Wc6juwcUUZKrXRUtijsmp5bj1J3wadmLbxDlZ2UKgDswUTvUwG56OarqB7eYcATALoZ0kkNzJtS2cOVCgTWSTez2omspy3XMvBMDPkuCoN+v58+40GKE77T6aQ6eaZj9nDINqkLemhjHVhGluvk5CSZ8VH43b17N9HnLBMp95cvX6by2oNjt9tNz2U99SClm2h+t4qNVd4G1R4mdaGhqSP7lUK63W5nNhq6KBeZqNaJq0xbrNlbXZucZZG38dM62A2dfq/jySpbeP9VJkDeprZOUwzdLLF+rJOOf1t3HUce7OKm5S5zmFsVisyx61DaXaeJK1GkSLuNsGukHqR0s6xzRP8HsvNON2R2rnlmhWX2M3koau8iJZy3npQZe1cdpLy6Va2XtrV3gOK1qLyE7u/shrbINEvHgXf1FNZlYOU30Wg00rihQnV9fT3tgezmejgcpr3E3t4eAOD58+cApnsLvofuA71eLx2c7t69O/dds9lcuD7/P6HoQM72bLVa7pzWPaJC9+p2juhBSpWZRUqIMrI0TPsCgUAgEAgEAoFAoCJKM1JEkTZRT3U8AVrmYn19fc5RHpgFD+j3+5mTZKvVyjiUqjZpUc2bal5sGfU71QRZDQoZgMFgkGhd1bjwd1Ybo5+xbtpeZelEC6WxPUaKYLlPT0/TqZ4sBjUnvV4vUxf+Ts10VHvt0aiLQrVr/JusE7U6p6enGQ3R3bt3sbu7CwB45ZVXAMwYqcPDQxweHs7VVx1L1VmR1zyt9TJjrwheHzabzaQxs4xUv99PrBw/U0aKbUYMh0OXLVgFqpqr3hYmypbBMug6Jjm/ut1uZk5oH1nNGKHmPlazVhe855Y1LcrTeut3RczmMibLyyJvbcozxyoyrVRW4CbZnlW/e5XzriiQANcWtWywpv+UZbr+UC7yqr9TlsiyrV4Ah6r18KAMrh1XzWYzM9d1/cwz+/OY6iJZoetWWaiLgZbJKw9QTkZ4rBOtPUajUabPh8Nh+t4GsdJnlYHK7jyTQF1X1UJFg0sAs/3p8fFx2mc8ffoUwGzf0e/3k3WL7qXu37+f/tbvxuNx7VY8FlXX3KLPqjyzCjzTeO+M4bn8eMy2/gZAZk+nz1cLsWCkAoFAIBAIBAKBQOCaUchIlfGd8LR4yt5Yv5ytra1kN0pbVF5PT0+Tdp2aiYuLi3TytDbTy8I6t6m9rLW9Vrtjnm416AG1Fsqusd62LYCs9qYu2BO1F+CC5T47O0v9Qs2MteUFZnWir9RkMnFZvLrrAky1DCyvBpQApm28v78PAOm6vr6O1157DQDw6quvAgDefPNNAFN75q+//hrATJPEZ+3s7CStlNrrW+2mak3q0N56/krWb06ZJX5GZ1cNa2/7ZGNjI9VJ/dvyAgbUxRoU+Z4UhTnVcuWxm1f5W60CHhtjfRxbrVZqa84lftfv9zN+hp5fqMceE8v4sWnb2Xd5KRm8dBSew7b1f1Am12Or6gxiUARPzmo5rM8qr+pHmmdjr1jlWKyLGSqSU9cxf4rYDPUh5HrJ+TGZTNI8sj6y2odcp6npX1tbyzAXXvjkOqwlPH9Tzwnek3+eNUwRW+VZRNj3LDNmvHcRnlzWOe/5OgHTvY9nvcOr/UzXevssL7hGEYraRNdVjhvuLTY2NjJBrbgn3dvbS/sHXrkOt1qttEYzFcvDhw9TyhjueYnz8/PKPlJlmaKqvp95lhdXPb/onrLQ/amXMoPlsYHSdI208mQwGGR8Fr3gE3kBKbQuZesUjFQgEAgEAoFAIBAIVERpH6k8+3DVYHjshGVoDg8P04mdPixkQTY2NpJm4OXLl+l3eezNIhp09Veyp1ai2WwmrYT60NhQ5cpMsZ6np6dz17Ozs/Sdx7JZeJrbsvB+q5o7G3r55OQkacr5Hfvi7t27qT81fDh/b7W6yjRYbdAimgo+z9NWaoJghh9lIuHPPvssjas33ngDAPCtb30LwFSj9OzZMwAzBovPunPnTtJOeZGCvMRudULZTquR7PV6mYiFHFMvXrxIfl/sO/UlY520HlYTk2ebvCz0eZZl0bFqNZqeP4rHnHjvqavMnsZOtdq8T9MF6N/ArM7n5+dJc2nZbL3PS9JbR4Q7lc95iQc1wqMmO/XmN8uRF6pYQ/J74Yu9JL+r8M3xIpsByNST86XRaGTKbesI1B9hdVl4LLaGA/ciV+n1OqDvsuu5ynjKtfF4nGGkOL90/llGqtFoZCxAxuNxmm+e/0/Vsad+F3ZM67ix33lrvsoKOyd1bipryt+VifxXFl7ZPKbZY5/IKKmFDjDvO+5drd+URjctYoLLwGMIPRaTY4pWUd1uN/2G+x5arzx+/BhffPHF3Ge89/79+2lf++jRo3SljxTfqb70bLeqdSqyTlCrA09O2Xa5uLiuF4/jAAAgAElEQVTIfKby2Y4tZU7LMF9XQdcjz0KAz7V7BU2iS3jhzz15XoWBK3vGqBxswnupHaDr6+tpgNoB9OzZs0SLcvPLje6jR4/SgGYjnZyczAnXvDKUhVL73sBhfTzTHetMqIsVhQHLSjr4+Pg4/c2DyPn5ecbEpw563nuOHni4MLG+p6enaYPH+xk6/N69e2mDwQnPegwGA3ey2jIsA50kHDtsPwqse/fupcPSl19+CQD4/PPP8bOf/QwA8O1vfxsA8L3vfQ8A8NFHH6X7/vEf/xHAjKLf2dlJpoA8tAwGg8xiqPWuYyNS1O8q8NkXNrP63t5e6kO2Cw+H29vbc/MImBcydSNPoHpj4+LiIuNYrGYddnypqW1e8Jm662IXHxXOarbH7zi/aC6q84f9xas6xnoLSJ118vL02cOEHtZ51XFnN7XAbCGy/Xh+fp5RoPX7/YxzuZeJfhl4ZlZWvqqZLNcayvXJZJLmCftaNz42h9tNHaSsAlAVDzZNwng8Tu1uTeSLgmvkoapsLzIr0vlkc/4Nh8PMmGN/6eZP8/8A07pZs0wvmIWXJ7IstPw2ZYYeJqxZmyrmvFDceXNTcxvpfLR97R2yysI7ANp56gWD6Pf7mUOSzn37ncp6PsszlSR0A2/nXxG8frVjZWNjI40pPYhzn0FlK/cMn332Wdqzso5ca+/du4fXX38dwMyVQPezti3VHaQsVJljFY2q5Lch3dWtwwY+u7y8zA05P5lMMiSJKtw8JWdV6EHKBo6xByot/3A4zLj6eMFlitYaLwAFn6UmhGWCv4VpXyAQCAQCgUAgEAhUROERv0hjpVo/q01cX19PzAYd/x8+fAgA+OUvf4n/+3//LwDgF7/4BQAkc6u3334bb731FgA/AzS1Gcto1KkF0frYMJfn5+cZDZY6uKoWF5hqONhW1vTq5OQkMQbURh8fHyfNGzU1enKuw7TPPkM1CcTZ2VkyobQBHXZ3d+c0gKwLy2w1oKr18AIoLBqOtdlspr5gWTm27t+/n8bL48ePAQCffPJJYqTeeecdAMAHH3wAAHj//ffx1VdfAUCi6Dn2dnZ2MoE2NjY2Ms6vGpZ9maAntn+UFbXhftfX19OYY1tQW/b8+fPUP9bZdXt7O8N8qonVqsJtW3gOnhqAgfND2R2WzYap7/V6ruN1XQ6wRU676vDK9mS/nJ2dpfsouziOLi4ukvklTVGV+bDmdmqGe5WjcJU66Zi1msxOp5NxvN7a2kp/s/1VC2gdgDUojWXhT09PU3/zPtX+1cmSqsbRjvVOp5P6hY7gnDenp6epn9SiAJi2F9tHtb91mF6Wgcf2amAMKzPYb7rWECqzrYbae+eqyq3jh3JN10XLPHNeDQYDNyAPMJUP/Fvbx2NtFq2jaumtybfCslW6r7DhvRWWOcljizkeedUUGVVTqHCca1JcW1ZlmHSvZINRKQtlmTpdR61plTJpKgv5nYa4rwIv6S4w70rCe/r9fpIB3FN89tlnAKbM1IsXLwDM+oZ7htdffx1vv/02gJlLAc36gHm2Hph3+SgLax0D+EGDWE/WTVNzWJcbZVVt36oLh+55rVltnvl3GajliWVi9WrTEV1cXMyx0HwGkWdWq+NOr9aEW1OYBCMVCAQCgUAgEAgEAitAaaNTzwkbmE+URu2j2i/S/4mswEcffYS/+7u/AwD85V/+JYAZO/Dzn/88aUZ4qtegB9ZG1rOpvQqa4M074QPzp3qyMtvb27h37x6AmaaB/29vb2d8wlQLYu2be71eqqf6TQHzDpeLwHOyZHn4ftW+kOWhNon37O7uJuaHz1QtM5/PdlK/jiItXVlowkWrKSEb8/Dhw8R4fve73wUwZTn/+q//GgDwv//3/wYA/Nqv/RoA4Pd///fxgx/8AAASK/qTn/wEwFTbRA21hkK1iZZVi1Gnj5SC/UPNGccgMNMcakJAamqpHWM9tra2kgZNGam8wBl1+Xt4dbJJnY+OjpLmj2Xk+JpMJmkcsm5k2dR/sUhTtGxdvFDhWhfra3F8fJzqRhmgIW9ZVzKg/P/8/DzVg3UejUaZoAHLQFkZqwX2NHWeM7NNgKqy2Grx8tgHqx3U75ZhciwDrnLdMhdbW1spGA3nCaEJvtlPlIvKdHh+knkWG8vUx4O2LcegJgPlmkTZsb+/nxzjeQ/Xr16vl+pX5MBeB7xn6tizjJTOC441rpWaUsRqxzVYgLLfdl1eJsAO58BkMskwJ6pVt5r1drud1jC+Xy1f8thBDezi+cV5zvPW+uQqkClXX0ab0sVjn87OzjL+T1qnvDQCKl+1T2zwEGXJOX6rwHumF+qc7Xt0dJSsVj799FMAU79rYLrWsvy7u7sAZvvU9957L1nAUL50u920prG9+P/JyUlqt7JQ6xLrI6hrFec+9wObm5upnuwPyrnBYJDqxL0Fr8PhcC6VCsHxbK0aqviwEeqbZNcYDfRm/bImk0km3UhReiQveIn6KXssFTBv1VCEyjUvorDZAU+ePEmDkRuG3/7t3wYwPUh99NFHAGZBAP78z/8cwDQAAIUlnfqGw2HakFjhsIgQVGFlHSc9up2dq+aKXKy4ED948GDuUAXMR4KyG2PtTOvk1mw25yJ6VUVeRJJms5lxHB8Oh6l/OLFYjvv376cFl59p9D4OQjV38SJ7LQpdINV8CpgJ/b29veTg+f777wMAfv3Xfz2Z9v2f//N/AAB/9md/BgD4/ve/nwJQ/PN//s8BTE0BgWnQCTqVsi/v3r2bBIgNiDCZTGoxjfMiHHqCxDrAMkjG0dFRGoc0n+X/vV4v4zSvC/aqDlCeM7wNWPLixYt0GHz+/DkAzAU+ofkVy69tUnajuQj09/agyf8nk8mcGSIwHZNUSvAz1qHdbqf5pQdgYDqX1HQG8Dfty0BNnPI2X2qircomKyN18dL2sL/zIvMRXrSnOsagtxjaiG+7u7spqhYP5+y3w8PDJAOo3OOzHj58mGvSAviBkFZh5re2tpaZ04PBIM15KpZY7r29vVQXbvS4RqmSSg/bqwh0oibLXhAXa06mprL2IHV+fp7ZoKuJn43wpYcaz7Svan05B4oOhxroRANQFEWxywtAZevHNskrd9WcS8Bsv6VBEKwp7unpqRs8wrpBeFEKrUJGFbtqwq6HHGA+IIzmtrwKekjT5wPzLhlWSfz06dN0gOKVa+35+XmSGdx3cD/x3nvvpSATnF+DwSC1nQ02dHZ2Vnmfp3sQq8hW8zarVNHAYew3ts/R0VHGdYNr0/n5eZp7rJM+35r9VT28K7yDtR6ArVk1MBtvuk9iGYmi9ccLOOO5p5Q5SIVpXyAQCAQCgUAgEAhURCEjVaRFVHMUngR5/+HhIX75y18CQDKz+tu//VsAwB/8wR/gX//rfw1gxgpQQ/jTn/40sQkaBIGnep7CF6EQCdVIqaYW8B0Blf7jCZmaEZ74Hz58mGED+N3W1lbGSXI8Hme0m2ouUGeIWXXktE6pwKydaV7F0/fdu3fnmA1gxhicnJwkbYqXX8A6Bi4SbIJot9tJ40CNJMvx+PHjRK3TdPS3fuu38Dd/8zcAgP/5P/8nAOB//a//BWA63v7jf/yPAIB/9s/+GQDgH/7hHwBMWRFqnkjNb25uZoIdqLZtmZDN1gxJqXqrQWs2m6nuZDRoejQajRKFz3lEJvHy8jKjQVQHUo8ZqEOLXuRMTq3X4eFhYqKoDWW/qhO3ZaYA38G2buh49RzVWR7Wa29vL9WHc4qBUDRMP8cYGfujoyOXGS/SoFeFMuyUZ1Yrd3x8nNEseg7tvCpTbOe7xzCoxt3TXC+TL8YLbsM6sozUqO7u7iazHK4j1BJ/8cUXKQAS+4nWEA8fPsy0QavVyg1nXTcb5eVhUjaJdaLDO7Xpv/jFL9LfH374IYCZfBsOh2nMahCdvDG2SJ2KAp2o9teGK9aAEhyPypDkBZ5Sc0s1t7PmR8vIDu1/y06qrLDmg15YZjWjKwoVnjfO+C69qplUWWhYbxs0gnshZZ+UPcvT2GvIchugy8vhtLW1Nfe3Xjc3NyuZ9imDZ9dwXrVuZGE+//zzFFyCwSa4LvV6vZQihRYw3/nOdwBMGSnOQY7bly9fzrHdwEzW9Pv9ygF27PjQd7FOms+Qe9BHjx6lNmDfcjw9ffo0lYP1ZFucnJxkUuZsbm6m39q0R9y3VIG3RywKhMR36F7F7mtVBhStlyy/l8+r6jobjFQgEAgEAoFAIBAIVERpasdqmXli6/V6yW6UrAxPwsDMmZ/+Kj/5yU/w7//9vwcA/If/8B8AzOxMv/vd76bTJR38Dg8PM86jqk2tenJUO05r28+T/uXl5ZxTPDA9yWuSLgBzTn1kb6jtU4aK2lBqW7QMnj3xohp2TTLsOdfZsJiafI5+R9TQPHz4MNWJWllqrTxHyVarVYtjPMF2aTabaUyw/ag5ef78edIeURP74Ycf4l/9q38FYMaCknX6H//jf+A3f/M3Acw0Sr/zO78DYBqWn5pb+kmonxjLoKxBHVp0Qpk9azs+mUySRot9QAax3W4nJop222ynw8PDjHZRQ4vXndDW1knnrecDYR1x+V2n03GdyIFpP1jmRLVTdSW25nOBbKAFnb9s14ODg9Q31OixXjs7O0kesI/ot7K3t5dhalQDXYevlKZw8MLB8uqNZ6vt9hKC2iA2XkLhonfXxe7atanVaqV5S9Z2Z2cnlZdyhH3xs5/9LAWhoRzUVAiUg5Sfnv183UEmPM2qTZzZ7XaTppxr71/8xV8AAP7+7/8+yXiuTZQXjx8/zvgFL2Pt4cFLJ2D/18+VsbGhmj1GyrKbQJZtUmakDh8pDU9urQc0XQD/VhbXtq8GAbJJib3UASorrU+VWjdUHYfqF2N9xlU+2TmsrK9l5brdboYFUoaJf2u6CP0emA8MoalwroL2kWUz2PfD4TCtoxrqnPKA33F8PHjwYG6vCsz2EY8ePUrP557x8PAw7au4fmvC6ar7JS+FCaEBpLxATRoKHZhZTbRarfQslk39uaxffb/fd+UssJiPlAbxsn79us+3AUPa7fbcGUTL6AUB8pgv9dezfolV90bBSAUCgcD/Y+/bfiS7rvJX3auru6e7p+fimbHjsT0JjpM4JCYJN3GJQII/gRfeEOIB3iOh2OYBXhD/BwjBQ14iRSAkoghEMAhydYIz8dhz7Zm+1726fg+lb9d31lnn1DlVp6rb/q3v5XRXnTpnX9fee12+5XA4HA6Hw5ETmdVP+rTIJzicBBEHcOfOnXBif+mll0RE5Jvf/KaITGKmoPGHleqrX/2qiEwY/XDyhL9puVyO+XVa9LpZYWmHLG0V6gttDMdN6Ziter0eTv+wYED7d+3atVjc1ObmZkyLa/lRLwKt9UWdRSRC1a4pxaGlvXnzZugDWByBdrsd+oQtR9onnLUAefuJNV86Ng3lf/r0abAigZHqC1/4gnzlK18REZF33nlHRET+4R/+QUQm7H2IlwLDzmc+8xkREXn99ddDvAC0SI8fPw6aMYxx9NsiiZNF4uPW0qIzRS40hoiNgrZwd3c3zDtYPdBenU4nWH1w/3g8Xoj6N0t9LC2QRa2tE0+iPOvr62EeYQxaqQZ4nCWlDMjbR6wN01Yg1rRq2tzj4+NgkYJ8e/nll0VkIgMwdmENgAXh/v37MXnCtME6DmkeyxRb3rMk27RiM7Smzorls5KFctJEHZ+yCGtkWr9yGTF/eR6jvaGF/vGPfywik/QbiEHU2vLd3d1g1dLWaS7PshLy8nu0Z8bm5maYL+hfWNbu3bsXrPVIRQIPibt374Y+Zy1zEqX7IuBkq1r+WJ4lSax3ItF4Im2ZOjs7i1mdrPjdRSxSlreHFTuOMYcxxHE+Ogby7OwsFqsNi9TR0VGMmvr4+Dh8ry3686RQYcpry8onEo0J4xgkHf/EViQd68TzKi0OSjPssQU8C3i91nstZpmGzIYH1L1798JeCP2LufXyyy8HpmnERiEOdmNjI/QbrE9PnjwJz2K6cSCvLOc9XVpcqGa/Y9ZKzCX2dNDx2pxEGeOarZ5aLizi2cJ1YtZXLg8/m71D9P7BqlOahxbATNlWKqQslsPUg5RlltcHjJOTkzChmfYXLlS4fvnLXxYRkW984xvy7W9/W0QkBPZi4j548CAcvCCEOA8AsIgbCJuHdZ4entDatMwuRTrvDZvDNSHCwcFBJO+RyGRTqN1EWEAVmZ+IDySahp3zWeHwgLqhnCJTQcL5pzgzNsqtF7BFDoS8+dPlxobm8PAwHH7effddEZlsFnCo+q3f+i0RmRKe/PSnP5VvfetbIiIhn9Trr78uIpNx+t5774mIBKKUx48fRyhERaLU8fPS1FsbFLQZb9KBk5OTUE8Iadx/7dq1SM41kejmHmOUSQaKdO3Lstnid/JhHgd0Lai2trYCjTMOvDiAbG5uhvHAbkBJ5B1FQC9M7C6C+pyenoY5hA06rjdu3AjKFNSZXYCZxEUkuolEffDueepluVhi7GIen5ychHKwOzO7ovCz2IVEu+1sbm5G3HVEosQtOr8Op4hYBHo8cw4/VkqgfpjnOHQ8fPgwtC/mO1wx+TAMsJvLKg5QuOJvtB/nWgSJCeomMj3Qoy6cMwf9qg/s1rsXAR9YrPxBSa53InEX/H6/bx76RSayQNPy63Loa175h3exgkhfLRIUfq8+VNZqtRjxFLt4a8WElaeK3bitAPo0sKJE5wni/tLv5PxOWmnBhyVLHuA+VsToTXGWnIEWuA35sCoyPZDv7e0FGQ3l1+PHj0PboaxQRLz66qvBQIA9BuT52dlZkCtYq/f29iL5ArkMvF/KCh4zWpnCyg9rvqAdtVsw58PUFPs8xtLctheRgeyuiDLxZ7jqdZ3XDD1OWdmEKx/SrNRAmlzOyneWBnftczgcDofD4XA4HI6cyKQGLJVKMU0RTmydTidoYkEZ+/Dhw6DdRDD/7/3e74nI5FSPz77zne+E+/kqMqURZ8pLyxUk7ymY3fmSTpqXL18O1iN20YN7lXYBOzw8jBBViEw1ZQjqE4la83A/NDWc6DKvdjatXSwXOR0YKzLVQqOO/X4/aGTgXoX7B4NBTFO9trZWKHkBl18nZYVrytbWVnDFgTXpJz/5ifzyL/+yiEzp9UG3f//+/eDuBxc/WHNu374dzPawIJ6cnIQ+Rt05sDKvxS0tWSdrg7QLAls1dRD8888/H9zEoM1D3xweHoa/MfZqtdpSXPsspAWLbm1the+h/Ue9L1++HOYd6gbNX71eD3XBHGMLzqKw3BE14UK9Xo+5oLTb7RhFPdxFnn/++VgWdk7qDXnCVkOtUbNcr7IizYWOrVXs0iEysbJpQhBOB6EtxRiT7XY7yDcOiEcfMVkDt8m80BpJ1qDjb5R7f38/zGm4BaO/xuNxjEYcrjuXL18OY5jdQDhYmuu06NxKS7aqg6ybzWboJ225vnXrVnBDwv2o/9HRUUQrW0S50+phBXmLRN3DmNBFz2krnUISZTjfnyan56kv5gK7/WDuY4zs7+/HPF6YYEFbYyxPBGu/wK62SdateawdKA+7YOo+aTabsb0DW5a01WlzczNmpWK5qYkMmLBK782yJkYFrKTZGBtM3ASLFFzmO51OmCew4CJtwGuvvRasu9gfcmJbPAPy5OnTp2EfjH6zLJBZwR4RGBva4n52dhbqiXfv7+8nEpmwBRd9A8+WSqUS+gZ92mw2Y+kutAUpD5jAjT2euG5sYYXs5fmivV2azWbMLXhWKgHMJU3gwknd0+AWKYfD4XA4HA6Hw+HIicymD63t45genN7gn82JDRHI+2u/9msiMiGfALkEtM0gnXj48GHMj1VEYj67jHm1s+wLrP1AmWyCE4lCkwGNHlum8B1O+py0DG3GNOv6NM3BcfPQSOr6Aewvq60CGxsb4f3QaMLycXp6Gu5DjBS0EkdHR8EyghN8s9lM1SbNC47rQP+gXDs7O8Hihz754Q9/GLTJ0CjBIvWDH/wgUKIjTg9kE5///OdDUl9YEt57773wfGhpYA0riu5daxNZg4Y5sL+/H8rBVKwiE2sHJ+AVidKvapp6Dry2KKqLgBWHoIlOOHEgwONNE7RwEkUdYL4MWEmkOZCcA6pFJvMFbQ1ZAG3nvXv3YjGRqM/29naoo7biiMRjL+ZJ+cBjTJNkQOs3GAxCmVibZ1nE8HtNSsAxa9oXv9/vR4haRCQS61ZEUmWtoS+VSuFd0MSenJxENMZct52dnWCRguYZsXk8/thyt+oYKZF4LEOpVAqeBFiDsaZ96lOfClZ3tAFbtzV1cdEWqbT4ICsYnq1sHH/LYNIUnTS20+nE4rmZ2j+NYj4r2PvECvbH1bLs6NQWbJHScZdc77Q5pi3Jg8Egdxw5xrnVFyzr8DcTRWgrG1ufuH585fXTqqe2Jg4Gg1xeBzye8TtttX348GH4G99VKpUgj0F1Dk+VT37ykyF2V8elPXnyJHhUwTJ1eHgYW6N4/5x3/4D+4P2pRVqCMnG8O+7TVjmOn8IahbV3fX09fId9T6vVCp+xhXhe4Lds3dWxgjxv2GLNceUi6R4wbEHNkjqB4yCzyHa3SDkcDofD4XA4HA5HTqRapCzWGa1lYYpLaB/ee++9kCgVWlmwqr3xxhvhpI8TMCwBGxsb4aTMp1zWxoospjVjLZfWIvEpVmvoRqNR0ERBe8EWKp14DZapXq8X06zV6/VYrBmzisxbPytGyqLMRN8xkxZO6dBWHh4ehlM84lOglTg9PY3RJ7PvvtVPeTW2rFXUPrgcZwMNMmv/kYAX5QbTzq/+6q8GbTRYrf7t3/5NRCYxcHgWGHnYogMrj0XnnBeWtYOtoRgvHGeHcmCOwZr73HPPhX5iC5bIZJzqscdxI6uKkWLNG881jtsSifpowwKoY9OYdn7Z1oAkbTMniYYM63Q6oa2ZFUpkwgil2fqAtbW18Axm79PywWIDywqOf0mKq2BGLtbicVJQkagPuY6vYWuCTgDJGtgi6M8ZSYleUU4RiaQBYG2sSDStAuYVrDj4jpO0s+Ww6NioLNCa/MFgEOY8xhCPN1gIsEaxJU5rtEXS51MR9dMWqVqtFhk7IpO+0G3K8pa9O0QklpqEYbF+LQKOsdBrE6/j+p3j8dhk6xOxLUGskU9jNdRyap4E16DGt+KgrHgo/iyNslxbPK21h/cobIESiVrb8rDkMnMwfse05LhiXQd2dnZCWyC2EPvTW7duxdZazKUHDx4EixTmWbfbjcV3L7J/4Hgo9LVO4TMajWIWqX6/H9of7cp07Pgtyzr8Du3I1sYki9Q8bNqW55b2zrFo9y1WTy6XTtfB8wzjE+OVLbj6d1nT3GTqSSsrPVCr1YJrEQdQg1IWblL4//DwMByyEMjLExCbCh1cqcsD5BXs6PRZLgdaqFWr1bA44RnYFLHbH64YqMfHx6Eu/G59eNNUjvNCu9vwQU0POBaCaGdMvv39/VB3HKBQ/729vZhrBbv6aGE5z+LFmywtXHlSYMzxJhRupTDDw9Xvi1/8YhiHcPGDW+mdO3eCCR8ugY8ePQoHNL1Q88RfBBYVOeqJzd/JyUnoOxwwULfd3d1IwKvItA/b7XZM4LJr37Jh5YlhwaaFHMsBTVnLC6sOCLVcdxbdMPHvNekEuwVj/mxubsaCWDEmHz58GGSk3kRWq9XYhoU3YZab7LwEO5xDTOfgYNmLed7r9WJB/LiyPAGs3DL8fO2ewe1ZxAbdcmvWbk/dbjdG+Q0FSqPRCDId8wztb7UFL9jLPkBZeWLwTnbzYjdFkeiBBGsTkyWkKVWKOHRY5dYbpnq9HiMsYZdkrQBk+me9DrXb7dgB3QowL6puWtHBckqv+9aBjpUz7OooMt17cC4knkN6E71IrjmknVlbW4vJI77qwxLPa4uyPCnUwFKQDgYDkxwA/+c5SPFhQKd44dxOnLJHZLKnu3PnjohMD1BIwXHp0qVwPxNWiExcavFcznuq+5TbaF5K92q1Gtvw83qJPQravtvthu9Rfg7NwDPQBvpekWh/63CUpLxjWaDnMdeJr5byTc85DgHSJEAAH8os10Erj1SWerlrn8PhcDgcDofD4XDkRGb6cx38Z2U6hhbv9u3bQcuH0/y9e/dEZKKlhZYfWgdoazlZLmtq0rTMeTVLTF1rJevS3+HaarXCCVYHULZarZg2lzOQw7LALiFJgZOLWgysBG26Tkx3CZOtTsb57Nmz0C86EW6z2YwlZub6JJVh3vpomk0muIAWBWXt9Xoh2BPWJ1jUbt26JV/4whdC/USm2qkf/vCH4RnQwF2/fj20i6bZrdfrC5FNJFE2j0ajWHLnXq8XtCgoIzTnGxsbQSMDTRj6kLVN7A60LJKJJPD72M1MW2fYnS3JXYCpj3kup7n55amfZWGwXPxQD9ako8w6ud/BwUHQXEJmYN4Nh8OYXOn1ehEq9EVhuVZasNyBtWzkK1Mx85VdCHncWYlYcX8Rc0nXg9cm1l7ifk1B3Ww2g4zTCSyZVjjNlWUZlrVZ37P7iXbFaTQaMTcppmIuwnsgCywPELZg6qBwJkJCO7N3CJBmkWLqe601X6Sead4jaSkurH0FjyGU11prdELeRqMRs3Kw+1Pe+QSvjbW1tZjrk2UZsywret/CLoZ6r8XWJ8vqpGV8Xtc+fo72FsKa3u/3Yy7aL774Ygg9gccUrLsi07UVIQJI+bO3txejOueksVkS286CRc4B8FxhogTUUxOwsBVQk3ixdUu71bJLot6XzbNvteaj3j9aHgvj8Ti2H7RcH3mPgN+lJcPWZExcjjS4RcrhcDgcDofD4XA4ciLVIsUnsaQkXCcnJ0EDCw3Kzs5O0CzB9xaWmidPngQLDZ6F/5miO43qfBFtEludkuhDu91uLCam0+nENDSsmYLGAZoN1kRw0k48Xwenal/rosDaWa05aDabQXOpE7Y9e/YslhXS1vsAACAASURBVGQQlp0kKmqtqVskmNxK5Ki1KdVqNbwT46vdbgetEeLzmCoc2iZYSBFP9cEHH4QEnRizm5ubwQIEyxWPmXkTiVp1A1jzjbE3Go1iyR1Rrmq1GknAKzK1ZHEyOW31ScKyNNLWWNB1hxaIg/i1VprnLVtHkixSi9RH/9aiIGeijCQrUrfbDdpQ9J9FGcu+20nlXiTekH3NrXgVK15Pv9cKDNfxJ1lj1tLGQhHgGCYuD+aCRb+POY3fYS5y0HqaFpWxCuIJEduqgXqUy+WY5ZDvXVVsF0NbjNjiwvNDx2pwTAOTQIlIhAjEsvJri9QiRDW8ziVRNg8Gg1An3l/otABWLIe22FWrVTP2WXuYLNKX8HBgq1NaXA+vIyy3uS7WHoutCGlWJ6ZyxzUP/TmnrWHvIP6uWq0GjxaQzNy+fTt4UcESxUmXmVxCZEp1fnR0FLOY1Wq1mAWPSTfy9pe2BInY3j96vjOBmabR53h97SlgkcyxJU3L9nlipPKmJeB3Wh5KXA9+Po8/vce2vGNyJ0vOemPSZGUTLQfbMe88X69fvx7bvLNwyyIcFmGDszpbT/ZKpRLLAM1sNhxALRINnNfl4ckElMvlmKDgNpj3MMVucNYGXbdtrVYLfYHDLDPc4DMdjLixsRHbPA2Hw8LZuPCMJCHQ6/Vih9vt7e1QBwhOHJparVZw3UFAKerY6XSCyykOw8yohkMNHxyLcEeyDhN6PJZKpdD2ODCi30SmY1QvFLxp5M3Fst14LKSNBT0nWUhafa/dRXjDzM9YBBazot6Ui0QD5vXGnDdJ6CMcqIBarWbOyyRyH57j8yCNtU8zI6UxPLKc0m5/vOnhw5XFYsbXRerDsNyrWC7qDYOVw01v2pIO66s6LDHS6mwpSpI2K3nLPk9dWfmgc5GlMUZubGyE73XesUajkUhG0+12Y89nV2F9cFnkIDUr/5Hl1matm/hdWj/pscpuYxajX96+wvpiyQFWZOoy8r5CEwek5bpi92W+J82VOM++COv1yclJxEVeZDpHNjc3w3qKw9PNmzcj5DP8u+Pj4+DSB+Y/yPNOpxMjdmImxjTWwqxI21fxZ1mYRFku6oNU2qGG92P6ffPUid+V5SDG4y7tIKUPg3yv5ZptuRPimmWf5659DofD4XA4HA6Hw5ETuRPhaMriSqUSs6602+3wmbYYVCqVmEtUkvZVZLnuE/rkzlpIaCFgmbKykjPFr6WlwrO1JpZPuVprwDTi8yDJ2sAneNYEQouO/kFZj46OYjkW2L0MWh426+u8CEVZptIINNjND2WDNVRnMt/a2gouDLhCE/Xhhx8G1ziY669duxbLccRjZh5TdlLdeAxqF6JarRY0hrCQYQwy+QLTrorYuRkWtWjMg6T+t4KxcU1yQWTrk2XRWEbdZmnoRKIB8zrQtVwuh3GKeYP+aDQapssV5wVZRl2ytB27PGhNnUg8v5+lxWN3R+2iu0hurKyw6pQkp7hu2iJlWT3T5Nqi7sxAUTTdq36nhrW2W+lGIGfH43HMIoX72SIF4J5erxeTeePxuFDXPh4nWchbAMvV1LJgpc1RK9zCGr95A//5udpSxNDWJ8vVmi1M+lmWtSqN5IvbKU9fYS3sdDoRKn2RaK4y7APg2nflypWwz0G5ILOfPn0a9gacYgTlY+8EXNPyH+VFVpmSxzvDct/LKy+yvGcWsq4BliVUE5RwzkK95vD+yiLMsyxTWSyhbpFyOBwOh8PhcDgcjpxItUhlOZmyHz1TxuIUpxPrcjwRYGXpTivPIhYODmDNEzQ3GAxCXXRAGmseLGpfIKt2pajYIpFo3bS2p1KpxKhOgdPT02CR0rFha2trkaSUeKYVz1EEkixS7PuO7+r1erDeaI3FkydPYj7L0Eix5gqWqbW1tZgVkjVqi8SyJfkZs+UQsOiB8XsO0OXYKJGoZSCLJqxoTfSs/k/SnJVKpURLjKW5WrY1I0tMWblcDmNKx6vw79BX8OFnQhD2Nde/LYI0Y9Z4tTThevxYstrSpFvWmyLjieZ9VrlcTtQ+iiS30TKtZqvCeZQ/bc1mra+miWbqZR2DwcloAY7PscaqFVeJ8s3bLmnWoaxEKtb6rMvMZcwqD/LOD02Zze+30iFwe1uJdfV3aTFPVqzrIhZDrk+v14vFLmE/s729HdZ/Tn2C+2GJwj5ob28vxEQxQZqIPYaZKnxVlt8inr/KuE9rbU3bR/L412MKfZ7W7jyG2Uq96Hhzi5TD4XA4HA6Hw+Fw5ETmGCmL/UgkGoeBk3ilUkn0RZ7F7pbElJX0Wd7Ts6VRtZ6htUPD4TBm4cCpt9frxRh02EpnWQOK0rxkxXg8NinWNQMh6tjtdmOsfdBOt1qtmN+xxcoFFBWXY40NzRTE1hudIPX09DT4NoO9D+x3Ozs7QduE/j05OYn5n8PqUBRNvWUF1RpYjsGz6JmtpMj4fRHUuFkx7zvmtTCdp4XAsmpY7G8iUe261p6J2HSzWRma8pQVZeEryyHNwseJPS2ZrdcEfrYV61B0aodZSBofWWP25sF5sPhZuAjWP0Za21YqlZg1VyRKX87gtBfaasUxOxxHlWSRmmdMWvNUxwvyd/x/0rwej8cx1jTLIpVmSVtk/ILxlddULQ+sBN2DwSAxDQLvmaz2z5IiYV5wnDHGAdZ6eHXs7u4GinN8Vy6Xw94G+wbsGfb392OWKIDZDtk7KWn9/ahbt4tCmrdHmoeYtZ/lPWDSumV5ZvEYnnc/nptsgiuCF1sBWlrApAU1ZzH/W79L+iwNXMak9/LhUAsABvP1a2HCgbJ5aMGLmmDWxLXqgkmPwwe7YsK0jcMWu1ZwnhKRqGAscuGd5SKhFxgmKkCdeEGAINQH383Nzdgmlw/IrCTA74omAkBZtWsYZ97W45Jd+/SmYJ7s6R8FrHoRslz8rAVAuzgD1maNhbieN4soitLKz+XW37E7pSXH02SY5XZkbfwsmnu+rhp52va8y5i2Rl7kTZlV7ixKiPF4nHiQYsIQ3R5Wzhz+2yK2ytt+1nP1fkfEnjPW4Srp+dZ3LP+zbEKzAu5rSbTt+F9vQmcdltLmfFoZ0w6hWcAud0z+IDLNh3np0qVwqML4Y4IM7BWgYD09PY25QPI+As9PIh7j3+m//3+FtR+3rml7LT3uODWNDrGx1iYe1/Me7t21z+FwOBwOh8PhcDhyouSnYofD4XA4HA6Hw+HIB7dIORwOh8PhcDgcDkdO+EHK4XA4HA6Hw+FwOHLCD1IOh8PhcDgcDofDkRN+kHI4HA6Hw+FwOByOnPCDlMPhcDgcDofD4XDkhB+kHA6Hw+FwOBwOhyMn/CDlcDgcDofD4XA4HDnhBymHw+FwOBwOh8PhyIlq2pdvvfVWyNZ7dnYmIiJI4ItruVyWer0uIiJra2siItJoNKRWq4mIyGAwEBGR4+NjERHZ39+Xo6MjERHp9/siIuH3rVZLNjY2Ip9Vq9XwrtFoFCsLvnvrrbdKWSrMdbroyFqnt99+eyV1KpUmxeEkznkTOmet09e+9rWxyGT8YAyh/wEeezyGMA55DAF4xnA4FDwfV3yGe3h8oe5AqVQKnxXRT1Y76nfOuj/td3mf/+abby517M2bCDytzLOQpU5Z6sNln7ets/aHvo/vydpHf/InfzIWEWm320Hm4jnNZlNERLa3t2VnZ0dEJFw3Nzel0WhEytHr9URE5OTkJMj009NTERHpdrsiMplbuL9SqYiISK1WC2sC5mO5XI5cRUT+/M//PFOdPv3pT4/xLvwez8W8X19fl/X1dRERuXTpUqgT/sZa02q1QltomcHtjXWH1yH8rdem0WgUPstap7xjz0Ke+cHyLe0Z1jOzjr0333wzvADPsa74m8cCYO099D5kEeDdy5Z5s5BXpmfBInVK6i+R6bwul8upfYc6oQ+5L9P6ddE6LWNdWmTtSUPWPvqLv/iLWOH0PGC5wzKJ9+siUbkMmQf5jO9KpVKs/0ajUdgn6T5lvP3225nq9Md//MdjlAvv1dd6vR7b5zUajbB2YY3i//GZ/l2tVgvPZRmv+5brhvr+xm/8RmKd3CLlcDgcDofD4XA4HDmRapECLAsETmyshcBJr9FoRKwAItPT/HA4DFpRXPHMRqMR02SWy2XzxDsv+FlJ2ocitFxpSNNsLEvrMQ+yaJryapPyvpu1ItpiJCKmtUprHFgLA60LnoXvuN21BZQ/s6xyi2AZbXaRsWh9Z1mDlolVWtGKHBewwJydnQWZ2+l0Ylf8jTk1Ho9le3tbRCRm5WUtIeYULFRs+cI8Yw00rvidSP42sjSG2irEz2UtJ+SCdeV1h39XKpUyy0H+/aphWdCTLAtWn1jPWGTOpXkv6DbmzyzrB2vF9T7EslBlLeuy1/usSLNaJ3lGrKIcSZYoXC2LlYhtOeR+S/rOwrL6KM9zz3PtYVgWFGt8pFmR2IpvWXREotYbq7/xTPzPfZoVbAVjK5lIdK1BmbisKK++skVKP8uS8QztdTAcDjOdPzIdpETiByj+3HKt4EWSCz0ajYILCK5oACzgIvYmHn8vYtZH56c9xzoULDKRLUGTJKwuymaYJ4w+kIhMx4GeoIxF2gzvHI1GkYkqMj2A83dwORqNRqb5Gld9yOKNkgV28+MrH/BWiYuy6K8a5+FysSosu/xXr14VkclYRztC1p6cnIjIxD0PLnrtdltEJnMK8/vKlSsiMnGNw1W7T/CiiOfyXE3aOPG8zAvLvU7PWZHopj3LQUovtrPcz6y6LaNfLYWPBes763BouSsWuaHl9tBrBK/n6ANWpOpDVZr7kqX0WuXhYxm4KOW21jx9gOW/+R59X1al+Crq/lFdS1mpY80RvacZj8dmKAN+D7mNfTiUZq1WK8h43i9pOciKrLxGDz7g6HWED0+6bM1mM/I3XxuNRsylzyo/t4+WI7znzKIUc9c+h8PhcDgcDofD4ciJ3BYp639tkWLTGk6oOOENBoNgiYIrCcBayzSzvyYdyIO0gNU0F8a0ZzHSNCl8ss0SeHsesDS3ONWjL1jDwf2qtU9ZtacWoJU4OzsL2hNtmep2u2EsoGxcDm1R48BKPb7YAgewdtZyIVlWPxX53IuidVtEky1iu2uxZuwiaG+zWM3Ooz9gkWo2m2FeafeMg4ODYJFiK5Xl7icyIacAkYMV0Is5B3c/nqt4Fs8p7cEwC5Y2UVtXuK3TXPuYBENbqi2XFqBcLsdc0RfxlsiLNPnKLnv4m4O3cUXdWbucFkSeF5Z1wrJM6c/YIqXDBNgKyV4s+j2LuPstC0nj4rzLxUjb67Grrki6RdiybrLMttxok6y+F6l9zgvW3kPPEW439FW3243Je3jwdLvdiGeAyJR8p9vtBg8EWH9qtVqihaZUKuXem8OKxK7i2o1vbW0tvJ+v2iLFv2MvJBFJ9XhgLyb+DMgiB90i5XA4HA6Hw+FwOBw5kZlswqKvxBWnXfa1xOlQB/WPRqPggw8NKMDWBIsaV/svzgOU0dIMWZ8VSXQBWCf689a4aC1/tVqN+Z6i7TjOjf3W07SxeesH7QSeLSIx6ubRaBQ05rin3W7HNN6stdF+uJbvLIMpnfmZVhxdHhSpsf4oWLDyWGRYq2zFvWhrIgeqrmoepb1HW2St2MK88RuL1Gtra0tEJnTg0DZCPnMg/5MnT0RE5NmzZyIysSbBOoUrxyLu7u6KSFRbKRK1dGB+HR0dBbmv+7Tf7+cedxYZjU5twDKJ79dxkqy91BpM1ppbYwx9q60+i8oHIOs4SYvx5D7jMq6vr5sxEPrdSf9ngWU5tGQp3s8WD7St1i6z5luXm9NYZCHSWCXyxp6tsoxp+yGLtABrbK/XC39jfWbrtSZ7YRmhLdnc58uMGS8i3v08wPNFry24sjWGvYUg99HW+/v7IhL1RIB8xrXVaoX9DwiLWGZYe/S84PWILVAi0Zgt3McWKSs2SsSW47PIxJJi+XjMp8EtUg6Hw+FwOBwOh8ORE5ljpADL6qA1R0xNqONbBoNBsCLg5AutEvs7WzFSRWgD4PPJsUj6dG/FKeWNXUpjs7His4qydFjvB9LKb2mOcPpHLARrDTX7V9GaGh3LhPeKRJOC4jO2bkKLAg0ZwAw1mhkmySKl29GiYF8WLCpTbSG1aJlnjZ/zpm4Vma0BZY2nSLQvdexevV4vzLd+EY211nSxxQP9pq2lHAuyrPgatkAgCS3mNDR9HKN0//59EZlYkWCJYmpzkUl8K/rkueeeizxza2srNs+YmQ/PSEq0nQVpTKJs8bKsg5YlCletyeSkjZa23Fqn8J4iLdaz4nx02diShjmk2/309DSsh1rG87MWmROWZUF7GHBcFuZ0r9eLeaBwv+m+w3e9Xi9mGeE6nCejX16m1ywxl0WD9x+ambff74e2xVjqdrthP6e9N0SiKXFEphaFJM8CLofIxfK2OG+vIWba0xYUtCV7A6Ctq9Vq6BOks8A60Gg0gifC4eGhiET7Vu+l+v1++K327pknvp/LYcVBiUzWKJwn2DKlLVEWxbkV926lydDWcrayQn6mYe6DFG/srIMUGgGFwD2DwSDm2qdz+/D9TNmbZaM7CxhI7A5k5R2ygo2tHBdJsIJsuQOTAjSZ2nUe5HEfsOrBi5YO/uNFNk9bzANeILX7DG8EsNHD2Ol0OuEzCHieMLqveVOOegLs0pp2GC4S1gGfy6EPrvV6PWZiT9tIXhRYmwpeuCHAWagDup94Luv6FkEKkpVEQh+SePOgD4R4ZrVaXToRBeZDo9EICxeu7LKBhQlteffu3eDmB1l9dHQUnsk06SIit27dCs/E83m+Ya4dHBxEntnv93O7ULOs1odnnrOW4iGJbII36Pq7SqUSc9+zXPyYfGJZ/WnJcZRFb1ir1Wpw7cSGCdd2ux1T0KyvrxfqJsubY00CZLljA0dHR4lEJ5cuXYq5k7KLn16b+v3+ShRfWZDUptZ6Mst9eBmHDJ4zem9i5QDtdrth/lsu9UmpSHjDb+USKnJ/kWUeWqEqSdesz+T7iqwHKxV1vj6R6VzAoWNjYyPIdvQNCIguX74clClQoD19+lREJvNS768Gg0HoZ8h4Dv2Y9yDVbDZj7nv4v9VqxQ5XTImeRnGeZqhgwhqtuGVCPE2KZ8Fd+xwOh8PhcDgcDocjJzKTTViWKEBbpNhMB60lu/Zpi5QmpuBnViqVcHIsIls8W1fyJGa0NF1WwLPlsmdZnVBX65oluK0IsFZF9ysnV7P63iIfKVIDyxpnTdHLiXmhLYDrkUWvz8HHmi6UtRhM3486JbUBB0/nxax20ham4XAYNCW4svkemhumhef/GVlIElaFLAHykBHoS55r0EQVnUQ0D1AHdkfEuEAS2+3tbXn06JGITOuDscYWgKyufXnrg3eXSqXQZrBSwCK1ubkZ/ubA5HfffVdERB4/fhx5FmsrUSeMuxdeeEEuX74sIlMNIrvUaavw8fFxzA13FtitE7+1KGz1HGWyCYvMQLursLZcy3+mRNeupVkTOSYhi5XS8nrQbq/Xrl0LbnuQke+//76IiHz44Yeh79jibbm6zwu2bqG8kMc87rW7v0jU+ikSlWsoE2uo8UxrnYY8KZLavQjweqQ15RfFigbwHMb4KpVKwTKgrS9MIGARAmj33DSLz7wyPKsstcaD5ZWU5qGS9u4iLFM8f/QYYZdr7YrWaDSCbIcF6Nq1ayIicv369fA3ZDbkw6NHj8IcxNp2fHycOE45vUZWoDyW1Yndz3VCXovinMeRRRqB/zU5Sq/Xi8kfPqOwTEqCW6QcDofD4XA4HA6HIydyJ+RN03JDw8BkE5qKsd/vxywGOEFbFimLdnaRxLw4YSclhMP/FrVjku+4pUmxfI5nxU3hOq9FKq/WxvKH1lS5XDZooQaDQazPitbwoU3Z75YDv1FGjCVoLY+OjoKmQWvKh8NhLBibteQ6rqNer5tJBkWWZ+VgrR/K2u12gzYZc4aDRnd2dkQkqpUSiWqXreD884RFkoGycj/pIFAreWeRsQJZn6UtEWdnZ7GYT9CD7+7uyr1790Rk6n+O/uMEh9a8L6JuH3zwgYhEg+8xxuErf/PmTdNKBQ3g97///ciznj59GsYZ6s00urdv3xaRqVWOtaJWkDnmb1ZYcU06PYIle633s3UrySLFHgwWKZG+FhUjpfvf0nKzPMb6BpnXaDSCxvnGjRsiMu3znZ0d+fDDD0VEUtt/FtFFGtI8OjC3u91ujF7/9PQ0zBVcObkz+hp1Y9KUtDJqbf0yLVNphE+8volM5DisNdpjR8ePJT17EVjxflZZIQ841lwTN7H1CfNJk7GMRqNQL7YQaA8d3hcVRdTB91gWJcxzbSHhcum4MN4vZbEizyPXWW5xjJpING4KMah6TolM+why+fLly+FvXLFu3b17N8RNIVZ2OByGOkP+c3xo3vnE5EdJMVLMucCy2krZoMuhE3f3er1Y/DVbndBmeS1SuQ9S1v/aXcpykQB484urtXmfl+RhFt577z0RiU5M3ehcP4tcIEvwOV8t9w99KOQ6Lnuzy6ZZHVzHbjIwrUJoYmCXy+WYwCvatY8FThLpRalUipBMiEwmgnY50i5+qIOIvdCjnpVKJbw7LUdNEeAxwrl9RCaTGpsJ9A9vgHGQwqaImZV0IOY8AbNFgse9ziOC9m82m6FsGJ88HrSSxnKxWnYd9Pzt9/th3GFBeuGFF0RkskhgIXr48KGITF3rms1mJEcWrkXWA+54HBiOcYz33Lx5M2xKceDZ2tqKufuh7d97772waf/Zz34mIhJxj8Di8/LLL4vIZMOrF0jAIoyYBV5fLMIB1M1y5dLKlLTcNvwefZBKc+0rug912fnv8Xgc2hsumJB97777bhhzr7/+uohIkBfXr1+Pudl1u93UzVBemcFtrfuJlRAYlzgs9Xq9MJ8g+7BBtIgorl+/LiKTw79mirPWbt54rlIO8uFXJKr0Qr/osAJmyQWWJed4HmoCk1qtFv7GHN7c3AxkBZBpTCDAJGMiEnHFh/xAn5+enobv9d5sWfXV7VqtVkN9sMZi/1MqlUKZIc8xRuc56OWtE5Ms6QMdKyCwnkAW7O3thbmjCbhu3rwZ1ivUGzJ/Z2cn1B0KtP39/Qjxk0iUTTBvnfBOy7WP80lpecwkZADva9PII3TerJOTk/CZ3ju2220nm3A4HA6Hw+FwOByOZSAz2YT+2zL3s4YvyezGVIOautHSGs5zyk3DT3/60/BuTeXJ2lptdmaCiDT3Lm1hYhID1sixloevbA7PirwaNSZrwMkbGhbkEhgOh8EiBc0MTL6tVmvpuW8sgghLK6E1jN1uN+YagTrt7++H+y3SCT1Wm81mzAWQaUTzmrGzWDJZ08LuOqAsxvuhRbpz504YSzDDs2kfWkK2JmYJlC0aWmPfbDbD+GKtlEg0EzvmBbTSo9HItGSt2mWRtekiE00Xxh00ei+99JKITPoDrn3ox8997nMiMtG64XfL6gd2C9WU5Wyl/cQnPiEiU4vFxsZGxDolMq3b2tqa/OQnPxGRad9Aa8nBu3zV+aY4SH1e7SzLcU0gw2uHdvEQibv3snXLskjpHFOWZwHAruNZkeYKpp+NMuGKfoT1EZ4X//7v/y7vvPOOiIh8+ctfFhGRL3zhCyIykecYv3iWlXupCHB7WHIc4NQWGLeQ35Brx8fHsbQIGAPXr18PY5THAxPTcN0Gg8FKiSfwLtQNZC737t0L8+Mzn/mMiEzn4aqs7LjqtRFt12q1grUJxAS7u7sRNzGRqTyvVqth3mEt3t/fF5HJeEBfow+L3utlgd67tlqtUB/Ib6R1ODs7C14fKDvL7mWvo0yoogmKMP6Pj4/DPEG7fvjhh2FvAPkAmf3aa68FN2xYn27evCkiUZc69Pv9+/fDXhF15zmVNyyF3fm01wPTm2vZm0Yowbmf9DrUbrcjFlBctSWKLVLaAmfBLVIOh8PhcDgcDofDkROpFinLEmVBn+rZCmP5MaYFFOpnWjFGiwA+rhx0pq+WtWowGETiY3R5AR17U61WTS0n/tan72azGUs4WhQ0bWWv1wvaiwcPHojINIaj1+sFLTQH+gMo47IsAdD4cNZ7yzJlJevVVJbo19PT06AR0/ERVgzE1tZWLFiWtddF0tNaVPoYe4eHh6Eu0BpBi/TKK68EjS00LfCNHgwGQTvIWvTzoNXVwdWtVitYOKGJRd1Go1H4W8daQlMkEu2TZWgyk2Qea22ZfhZ/I14DmswPPvggWAgwNtEvzWYzRkGNd+QpUxpQVg6c1UQR3W43zPNXXnlFRCZWT/yt/ec56e4Pf/hDEZlqO/f29sKz2FKMd4L0AM+s1WrBSpUVabIUsqNarSauNSJTq5xluYB84/dkSf5qxTAtAis4XceubGxshDbFnMI93/nOd+THP/6xiEzlA2T+q6++alo9ihx7bPW20qSIRNuW5bmOq+E4D01UxVfIE7ZMaSvLIlr0rMhiTUS5f/zjH8vPf/5zEZmO409/+tMiMpGVaQRXRZaVrfvak6bRaESSpIpESQL0noBj3yBnMAaPj49jgf3dbjdG3mDFqBdZZ03GtLa2FixriHF98cUXRSTqwaMJgopOG2CB+0Eno+U2x34He90nT56E/R08I2BZe/DgQfCOuHPnjohMvV2azWaYS7z3gpUKeyn033A4zL0fnBUHJRJNsGul4OG9oohNZ84xUBh3fNUxUjwm3SLlcDgcDofD4XA4HEtAZtY+ICtjXRJtK58k02I0luUrC2YqK1aLT7b6lMsxUtqyw7BipLQ1g61U1jVvUrOs0OXudrtBywdtMvsAow2g2YOmZn19PUZrr/9eFBz3oLXErB2xKNF1zBvH4uEzaFP42ZbPvmXRwT1837yw5gn3D+qB9oaGCFqy7e3t4P+MK+JwONkrW9uKjAnIQo3M8gD1WF9fD7F30PxBmz4cDkN5ntXeOQAAIABJREFUtVWxVCrF6IDPk9KdtWAYP7AOQGt479690DeoIyxuHEdQRNJGC5i/o9EoaN50gmBLy/8Lv/ALoS7MQCgSTeCLKyjSP/zww6B5vnv3rohE5x76ki1TeWUeW6EsGmVcmVEK5WBfepGoZRPtoWWOlQCSLVIW+2qR/cjzTK+frVYryAPEQf3SL/2SiIh89rOflW9/+9siMrVUQ/ZhDOAZKHcS5qkP2pot4bodORZDM7NyPVHvg4ODEKehvzs7O4v1LzP5WWy5q5IfzDiKeMQvfelLIiLyO7/zO8FyyLEwSVgWK6R+B1+Hw2GYu5jfIlOLIcYXMBgMYpYoZurTsp29T1bFpKjHg+VNxYnuUX7MIXiDDAaDmPdE3hjJWWBrpk5ay8lrUX60YbfbDXsCxEahr+7fvx/2fLBMffKTnxSRiXzWLI1XrlwJz0X90CadTie3dTeNFRWwGJI5TZCmgudYYG1pmmWR0ilz2Eqahtw7QaZc1UgjpbA2XEkU4EkoYnJ99rOfXfgZFrLmLVgG0vrEej8vNDoPkxU8mSXzuPXZIoKe6Su1ayS7hDBVOcrA5nbUU2RS7729PRGZboAhCB8/fhxbzFkQMlkD6la0oEwqf7lcDodYBILC7fLk5CSY62G+x4Z1e3s7RhXONK1Fj8e0g4B2pVxbW4vR5mJD3u12E/P51Gq12ObpPIE+Go/HMYUDNg/vvfdemGc4PKLufPDn+VbkJgmuGuw2inGPhZVpYflwhbLhIMV5obB46wPV9773vTAmIU8ePHgQc8XAOH3uuefCGMgKlglMPMHPbTQaMYUYK/I0hTAfpCz3M4uAwlKw4P+8fZgmX3nM641DqVQKB/RPfepTIjLdFH3mM58Jf3/3u98VkamipVarxdqCDzBFjEEOBNduluw2zbmHcOW/UV6RiWICCkCtGGA3X66bzhHIsmpZbsFpchAK3a9+9asiMllf/vVf/1VEpv2EDeqq6dn1mOCDFA5NOMiyYg73WxtanXeJXT0tkrK0VANFwXrecDgM7Q7lF+pwcHAQIQcRmR6oeN5kKfMiSglWtmNuYD3Z3t5OpaFHnXCQOj4+Dn0JuYDwjjt37gRlF6eswD4J8p7XLZ2nbRYs4h4rx6pe8weDQUzes7u6dYBCfS1iCesAJRI1qqTBXfscDofD4XA4HA6HIycyW6SynKDZ4qGJGVgDpN2N+FRq0WoXqZF59dVXw7ut7PX43yqjdg+wCAIsSkadzHU4HMasDvx/lhOwhbwWI/5fZ/OuVqtBswHNM7td5BkPumxZYNEUp5F2sOlZa8is7ONM8y4y0f7BWsVZtLUGk4lAluG6w3MH2NzcDO+F5hkak7t37wYiA2jScO/W1laoC1stl63htCyTaeNRU8menJwEaw7qqeUInmu9b1ng/tYEObVaLQTuYy4hePyDDz4IGkNo+DBuu93u0sk/MGY4aBeARvXg4CCS1BpX/I3vQHJy6dKlYP2ANYEtVN/73vdEZNoGrPnUlOT9fj8QdGQFxjhb9FAOdtW2XLR137FlCjJGk3Gw5cuySGnL1KKyQY9t/l97ETx48CBokSHX4OJ39epV+cVf/EURmY5LpAA5PDyMEfkwinA1ZWsGYBEyMXGGSJR0CVf079raWqDah5zAWDo8PIxY3ADUE/KQvQlWlex1PB6H8YTUAeiTGzduhP7UY2kVlnfLS0HLXCYAgRWg3W6HcusEu+12O5L4WEQirmLoa76ifwB23c/TT2lrg/Ucdt+DnNKuqPv7+yH5OMYY2iErWc4iY40t0GgXPW+q1WpYh+AZcfny5eBKDosa5PLh4WEkbYXItP+ePXsWKOAhnzc2NmL7X5bF82I8HkeslSJRi6jeQ7M3lZbVTGduXbX31enpaSyNAoeDZKmXW6QcDofD4XA4HA6HIyfmjpGywBo+yzddZKJl0ZYFDs7TmgTWoBcRG4GTu+Xzblk6LCtVWkyX1naORqNYW3AQtCZEYOr1RWCRGOhy1+v10B7QWnPfwI8b2gy2Vi07QBdagFKpFLQFaQk0oa3c2NiIaTQsixSeBS3GaDQKmgpopNbW1mJB8EzZvMg4TJpH7A+Mel66dClovFBuxAR0Op2gjcb96Mutra1Y4Ck/P0+5FoEVAN7pdEKcAzRasI6cnJzENOyYJ5Z2aJYldll1YjSbzTA2oCFD/NHx8XGwRIEsBPXodDrhWUWmeWBwgkxthcc83tvbC3MB/v/dbjcWNwVt5Z07d4LGE4HzTG2v6dJ//vOfJ8a1iOTXZrKcwtzHfGcadMsipWNFOVmv9rdnawjKzTE7STEdIsUEm+uxwH2IcfPw4cOgaf7Rj34kIlPij9dffz1oqAGOz0N902TCIp4FvHajD6wYaSvxeVJsLMdXIi4UsrtcLof1ArKj0WjErDsYI8uIc00CWxMx9tFvOzs7YS220sGsomxAmsUWWnym0sd6CfmtvQlEpn3IY4/jZXUZNIqQiVn2rv1+P0KewN8dHx+H73Qia2t/WDR4rur4dbQvzw3M+93d3RCbiz0d/r97927oP9QX8n80GoXPMJeuXr0a24swoUxeAi6W+6iLtkzxHtpKp8HJc0WSCSVwr76/2+3GCOdwnbVfCmXPU2mHw+FwOBwOh8PhcMywSKVRW7M2SVsA+v1+OF3ipIcTfKVSiSVQnEV5WiQlJk7fbM3Qieeq1apJG53kB2/FSHGb6PbhOBitMc3KEpIXHJvGfrVg4UKdcMqvVCrBEoUra/GWTb2qtWFcRotaHtd6vR40JqibldwX5UdcVL/fjyRXFZlo2zTrHd7dbDYLoT8HrLHNllpo7TCfYLE5Pj4OfcbWVpFoIkdLI78sWHNBWxwODg5iYw7/93q9WJJhZrbLorlcNL4jKwMmns8xHZAxGFulUikk52VmQpFJXbV2cZ7ypIHlmkXhjXfD759jbziZtci0P05OTgJNLrSbuJbL5Vjs4sbGhrz//vsiMtVco0+fPXuWu59YZqPdtUy1LFIWxTJrPjUTFGQB9y8nw9SeC4vQn6eNWZZ5Okl4v98P/YIYDlj/7t+/H+IcmAZfP5etH1mtU3nqxO+w1jddZ06xgVhdTqGAcYXvMHY5YTfex4yU2gLFSd2LhBUXyom80Ye81uB+fLcsC3UaWL7qNZj3JuxBY3kdiUz2GWhvrF/or62trWA5wXrNSVl1/Py87IpZf8N11l5C7E2B8qDMLP/1HqHo1DBWeh5mtsN70HbYD6yvr4c9HLxVIKtv3LgRUlRoRmP2DMHze71esHRpxmSR/PIBbcxzULP2MUMf1qN2ux2zSKUl2GUrlKbdT/JcyINMO0EeBFrosMBgEzB+o6l9q9VqWIjwGf63qEuZrKGIDSA483liWldLiCUdJtNgbfwsd8VlkWtYdWLyBgguCAYOaES/JB0m8pYhD7T7DZeNqYb1QQqfi0wFCYTGYDCITRDU//DwMEb3yq5NlisIfrssWJSqPPlFJpsEtC82Suyiquu7aipdkejmlRddFtgiUdO+3tDqxXoWlrn54DnKCynABA4ik7mFDQT6lOmAl71R4qBpzHOtPOI8anA7YrcdHm8ik4UJm3ccqBCQvL6+HkgpMF/Y9VEfqIbDYSQvTRag/DzGLeWUptzm4OG0oGbrQKUVgHyQ0kq5Reia01zpmCoc8317eztsbtC2mFP7+/uhjOg7/D6JBCZvoH5WWDJdv4fXQWzU0LaQ5yLRccXfHRwcRFzKUGadc4ZJDIpUiOn3amgqd6aV1jkRgUVcK4uAdYjncuMzfVDnMA4cpPC79fX18BnvL9JIW1ZRd2v/xQdEnSKF91SWe2+RZbZCZ6ycmfqQxZToaGvI4t3d3eDuB5c+yHyeR9aBH+/JS0LG4HdYByjUTbtct9vtmNs5H56sVB64WuE0ae60Wdx/3bXP4XA4HA6Hw+FwOHIiN/15GnUknyBxatWufdVqNZZFHaf8Wq1mWqQsak7r/yzgAGft4sH/J7l/ZH2vZQkCmOadXSTx/7JcDbTWlK1O6BNoCM7OzkxXOi7zMsFaUa3BhHaB24rbDOMJn0ELv7u7GzPXs6sFtLiMpODzSqWylGDg8TieyJE1x9oqNxgMYu4TrBlLsnwuE1rzzGQTHMTJ7gIidlLlIghmlgnWZnNQskiUGpdp80XibiMiy3PlsYK+9Vhh6zRfoZ1EneAG0uv1Yu5+r7zyiohM3EUgV0CuUSqVYu+Gd8D+/n5ugh2U++zsLDwXY4vJJyzXPm2R4qu2nLJLCbuViESTxloWqSLkuB7/g8Eg5kWwtbUVLFKwvsNNp9vthrLpOTjLRbvIAH9r/eR+sAgO0Ae8PxCZtDtcldjdT2TSJiAEYBdC7cKIZ6/CTd2C9qQo2hNlEWjPFYDJBDDHtra2YuszE7BYCdXxbL0HKmLfVSS010HaWLEs0EWPK5YnSRYpllN8hXxAvzG5GOQI7oE73/7+fsy6y66PGB/oM04XkxWQpeyhYtWNLVG4JlmkTk9Pw334HbvSM7U5Xxl5E0G7RcrhcDgcDofD4XA4ciJzjJS2nPB3WsvDSSY1vWqtVgunYg7cF7G1uxwjZWml85762X9Ua/v4PZa1Ko923LIwcRtqKw/fUyQlK2tTdGLbarUa6qzJNQaDQayeedu6KI2M1mBCo2C1Ld9nWaZA2cyWEZFJ/RHTgjHLliDcD81GrVabW+OctV0sWm+tuR2PxzEff53UWv+9aLnmgR5LnNwRsMb9RdDSZtHacxyG7iOLRp+1YMueVxizrPHlOB+RiR89xx2JRC0uoHLH3Hjw4EGoA7R+sEwdHR0FundYure3t+WFF14QkWk/49kPHz4M8VJZwfJK18myamSxSLFmXXtZWAkgmYDCSuWxDIsUJxTGd5ubm+H9iFOD7Ds5OQl9ZslKixBhGWBZqq1UvLayR4TuA943oC4YX/h9pVIJfYJ+4rmmZQzPiSIxK66p6PizopDkQQNwfJ1INDbIsj4lxX0xpTVbSrUMLTI+PiuS3pV1b7asftSpTESiXmC4ass504HDkot5U6/XY4RjkB2XLl0K1l08nz2VLEIwvc7NAsrFsprrIpJMdc4Jdfna6XRMq5x+D6/TSYnUs3oWFJJHSrvxdbvdWKOkBTynufZZh5oiGKw4D1BW1748759FXKHd5BZhe8qKNFKNJDc+xiqFmc5RwO/nA1XaeNS5HkSmAgRmbBYCGI88ufV4ZJefVeUgSdv4iORje1tlH/KBTi8CXGbdXxdhQ5EVPJf0AQrfcd61ImVZVvAiqDeNHCiMYHGLEQ8bKLiMdTqd4PbHOVhEJoscDlVw7VtfXw9zD8HNACvXsoLlg97I4VDB7KjMxqQVc7x+Ja0Jg8Egtih3Op3QPpaLX175kDbPWfZp15d2ux1bU5k5DX9nYaJaJrGBPkDx/xbxR5KbJbsQsYJWZOruyJ8xsxzAfbNst+GLemhipJWH9ypa4ZqWj5PJB3T/DgaDVLl/Xq7ci8jjZfcpH0p1u3JOPBwoWPmLz3AwAvlEq9WKHaCwN2JiG+yJmLDLOnzk7TesExbBFMtbfWjqdDox971ZhBIon5bxFqEJE5plORy6a5/D4XA4HA6Hw+Fw5ERmi5TlLiSS7NrHOS4YnANCU2PyyS/NlWERMG1n2ul5GZr8rFSey8qMrTVATEud5O7CmGUZse6bF6wZ0P3EWhg9vtiFRAcrs4ZM04CythbjkC2r2n2Otd1562RhGZT35wXL8mlRDCeRl1iEG6vCIv2Q5CbFAat5n1/EuOBM8Xp+s1sc+ogz12srD1um2HVEZEoeMRgMgnYQ3127di3MNbwHbibj8Ti39YY1hoCWZcPhMJSXP9OuHdzGmuSEZQ2vb7hqdz/LrWkeJBGPMPkOu0axtUbEzg+VRqu9SliWKcsrRMtxtmboVBxMUa2JXSqVSmQO6LKsKo8U46NkpbJCEtI8aCyXL+2mybl7rDyP50GStAhW0XdWTi195XZlbzDIBy2XNzc3zdQuIhNrNtYCzvGowyIWAeeJtFz6UGZtfWq32zHZy79PIpSwLO48rnUeM7a4psEtUg6Hw+FwOBwOh8ORE7nJJqyTt0V/jvt0Ql72h8epjyml9TP57yK0E2mny1VqhFatYRmNRpmSKWcN7ly2FcEiN7EsUyg37od2gu9HnRqNRqyMnNBRZ2avVquphAh5s18XiYtswbLi/ixNcJpMWVUQ/DzI2/bLmBfzYDgcJpKWnJ2dxYgTGo1G8JfXsrrZbIYYKWg3IeufPn1qxk0hqBmWKSYMwHvyolKphDrpWKlms2laqZLioESm7Yv72XqiE/+yRUq3Xb1ezx14nQaLvIi1rNo6nhQ8zcjqYbAscNtqKxW3t47BZuugtmrzs9giqGNdV1HfvFboiyrzeJ7o+YHPRab7v7Q9Isfy8N+4Z5X9Y2GWJfE8wQQfSTHtHLvNcUGQU5qwq9PpBC8tjpcVmfQBns+yzJI/uOYds5ZFSluamGyCP2NKc75aXgcAlw/1TCNMcYuUw+FwOBwOh8PhcCwJuZ24s1g1er1euE9bpKrVaoymNqtFqgifzDQtuIUi/afT/NUZRWpE2A/dsjZp1pd5rX7LsmqIxPvA0lpZ2jArJkxridk3FrFUbJnSrJNJZVoGimzTZWnZ0rSus2IC89TPYthZBizNdl7M29bLqhvHkSTNc/6OLbjQ1MGKBBldr9eD3AZ1ORiVhsNhYIcChsNh0CAiBcHGxkZ4ZhatH4NZ+7R2FnOcrWxWXKjWkuvyikT7Usd+cJ2wprGFKq9FKm2M87ql68v04fpZSc9Len7RyGKVmfVuy4Ko03akWb/5Pm2ZWgWyWNxn3X8esMqoZQTHf1peKlbsm/7uolmA8lgSz8OLib05NEtnuVw2U9nofuM0A9g7sSeCSHQ/zu/WsVRWXGNWwJuh1+uZbH24WtYn/K2TrlvykNtHy896vR5L/cHJv7OsTZkPUmkDxqIp1ea/8MJqNXQUOjWNLtOi7C0CWYM9z4Madd5npAnsWXmwzotudBayHKgYFgEEHxZ1MDa/hycPfqfHMb9zVfTnWXGeC3Cam9BF2RhcJKxSUWG5x2rXO5azfNXzBW4g5XI55u53eHgoIpPFURMzPHv2LOZWwjlN8h46LNdf3aZWndi9MS0/IbuRaVh5qrCooy16vV7uw6FVD4uYAZh1eLCep5+xKuQlKrLusdYy3S5JCi6LOGFWWZaJj5JMTDvwWOlJrPuzHhzzKrqXiYtQBkaacoQPBfogVa1WY6EbwHA4jKTH4Hs496g1L4voKyjfOE8frkyHrt33OKWBJaPZDVL/r4mKarWaSd0vkv0g5a59DofD4XA4HA6Hw5ETpYtmVnU4HA6Hw+FwOByOiw63SDkcDofD4XA4HA5HTvhByuFwOBwOh8PhcDhywg9SDofD4XA4HA6Hw5ETfpByOBwOh8PhcDgcjpzwg5TD4XA4HA6Hw+Fw5IQfpBwOh8PhcDgcDocjJ/wg5XA4HA6Hw+FwOBw5UU378u233/7IJJl68803M6VX/sM//MOxiMje3l7Iqoysx5ubmyIicvXqVbl+/bqIiFy5ckVERC5duiS1Wk1EphmgkWW50+lEMi6LTLMtj8djMwN0UjZo/jxrnd56663QT2nPtcqhs1kPBgMRmWS8xncAZ4TWGaA5ozx+xxnO8fc8dbLqkoSsGdSz/i5L1u6sdVrGfFpWHri33nrr3Oq0LGTpp7RxB2TN5J6lbxbJCr+scVfkmMpbv0XkwyJtOQ+ytlPWufT1r399LGLLasjXUqkUZG6lUglXLYexVlUqlch9fE8SsBZAjg+Hw3DF33/6p3+aqU5/9md/FlubrKtVRl1evi7a17w2AV//+tcvlMzjss1b36zz6c033wwvyyu3suxlikSWOqGP5pFl85Z7Xnl/nvuHZSFrnf76r/96LDLZQ2M/jiv20iJTudBsNkVEpNVqSaPRiHyHth2Px0F26b2o/jsL8Ny0OrlFyuFwOBwOh8PhcDhyItUitSpkPSEWoeGAhanf70u32xWR6QkYVqXBYBC0ckC5XJZLly6JyFTbx1pCXTa2TOn6lUql8Jn+HVuwigBr/aznohywRPEVbYB6wgrF2kLWhOo6sVVu3nKnaRJm1cn6PIv2bNY750WRmn7rWavWyC8TRbZ7USjSEpUXy7I8LvsdRWjV877rIoyVecAWGNRBW2qq1Wrss1qtFuSwZa3idUpDW53Ozs5i3gm49vv98HdWsGbY0g6jXJbVSa8x3D7aApe2FlvgdXEVc2senMc4ttY+/izJS0UkXl6rLyyvmGVinv6dV45Ye5ainv1xxfr6uohMZBjaRMukbrcb2o3lFO5bW1sTkehe1NpX43dZ+sn6bRpyH6TOU+gUMQhv3LghIpMGxWEHB6iTkxMRmXQcvmPBAaGxtbUlItOOW1tbSyxTv9/PdKBI2sjngWVu56t26RiNRmFgYoHE4bLf74dyoJ7szoe/cagUsc2oRdRJ/5b7RLt/8P3WQpBlg5420c5DELJrZJYxlMXt4rww78b647YAneeh7Lze83Hrw6JhuePpA1K1Wk39zJKHAG8mRKLyH+tdv9+PrYvsts7uNlkAV5zBYBDWGH1QY/kGlMvlUAfrMKnry/9bbZDmzujjcdqO1lqjxw3DUhQD1kHKOvAue93Ku3EGln2g8nEnwT2v0WhE5J9INIQG+1Len0J2oZ1brZaITOSoln+4t1wuh+fOOy4suGufw+FwOBwOh8PhcOREZovURTV/5wVIJETimjGcdk9OToLmTWvRROKWqXq9HjsBs7YhjYACKFpbq8uBUz6/azQahbJ1Oh0REWm32yIyqXeSa0Wj0QiaRn6u1VZ43yIWKa05gHZhNBoF7QJrZ7VGQwdP47n8zCTL1DxugkWBLXzaqmnVxXKP0fecFz7K8iPJPYUxy2p43u0/C6t0cboolqm87192+0CmJlmdRKJufJb7Hq48FiE7tPdBv98P1ibW+OJv/d08rn27u7uJ72IrmF47LMsXW6jYrZHbolarhc/Y1SfNFXCVyDOGVlk2eJgwLIuUtabq9TXNgpXmIVOpVBJd/4poi/OwTGVx83NMZB9c9DSJxNnZWZBdx8fHIjKRIZAn7O4nMnEXxDOYII3v4b+LWPvcIuVwOBwOh8PhcDgcOZHJInWedLhFY3t7W0SiAWlaC9btdkO8FJ96tWYC1+3t7aDRSQumnEVAITKfpSMtNkpTQ+r6whKFkz4sUqPRKEI1KRKlnkSQIE78FkFHlrieJHBbJ/m3j0ajGO08azbQJ2x51JadWe2eJV6qaGjqTiuWzbKQWlpaaGKA855/H2XMijvR/WYRvVy02CjGPDFri2CVlqk0GZkFs+hzi2gPyFe2OmnrSlIMEHsZ8HUwGMRinmAR6nQ6Qf7zFX9r2WrJ+Fm4detW+K1+P9YatoLhM47HYvp1PAvQGme25nHb6c8sa95FQ9aY6iLA4yvJasdW9yxrE39npUTBfWlrMO9fivbWSZrHaXHUy4iXOg9cFK8JyATet2G/CasS72EgE46OjmKWcz3W+BnWPhiYJduz4GJKEIfD4XA4HA6Hw+G4wMhkkZrnpKrjNtKQxvZWNGCl2N3djWnr2VKjmfwslhCgXC4HS5fla6yRxuS3iL8ma220NonZSphWEpaoo6MjEZlqJsvlctCQ4rqxsSEik9gwfMYaJq19YQ39vHViukpLq4jyg8K+0+mE+iGGjWMPoMXQ2otZWqdVaZZYI8ZaZa19YcYrjl3jMnLs3nnFBHyUkTSXKpWKaQFAn2BMsbY8KYbS0oBeNO2lhYtsWWOksZcm9S+DZZkVa1nk2sU0vlYcFMqIcrJ81ex7bP3R8a+c+FJ/x7EHOrbXinmZBbDksgVCl7HT6Zhlw2dcNvxOzzUus2Wl0pY9XkOYeXbZSJo35z1PMH5KpVIsNprbzNLs67gpiy5fx5wPh8OY9VQkbqXmFCxFr11WHXiea1hJovO+67z7+bzfr4H9db1eD/IP+0x4PbVarTAGUf5+vy8HBwciEo+5ZMu53gNa/cbeJEDedlpKHike9DrIUyTOE593kC3iEoJ3tlqtkFOKCRNEoiQMENDHx8cx4W1ROeNAhU1tEiwCCv2seZBEPCAyrTsfEjEYcUUd19fXw6EQ+bMuX74sIiI7Ozth8eHFzSKDEJlvAeYDmu5vlGttbS2UA+V+9uxZOFyhjUEwsra2FsYjFmw+kGicx4GKNxy88cBmAhsftPt4PA5toBcAa9N4kWhXdb9yoPx5knwAehPOskz3EW9AMd7T8q5ZLoEXbZETsQ8iSUiTi+cFqw+TSHQsYhteq9Lcay035rx1t+jPrUMT/uZNKrvriUQPTdis6GuSG5/lIoNyaeXjLHAAua4T2ord/lCe09PTIMdx5fKz4ozLz5soXofQVtr9mQ+t5wmLDGoR1/i8gCKVD5aQX9jLWIQAjUbD3OOJRNvdOuDrA3u/348dypbZBjw/ee2xcqvhniQX0axz/aIcqCycBwkQ5na5XA5tijGGQ9DGxkb4jJUeGCN7e3siIvL06VMRsQ0V2Jc3m81EanSRuMzL2k/u2udwOBwOh8PhcDgcObEUVQy7cumAUcvCYGV0z3Iqnkc7Da1WrVYLQW3Xrl2LlIPLi/K///77QWtjaez0KTfNMmVp3LMk7U0Ca/STXIjOzs6CdgjavMPDQ9nf3w9/i0zbYHNzUzY3N0VEguXu6tWroW6W5khbovikv4gpXFu28KxWqxWsZejLs7Mz+fnPfy4iEuoG7exLL70U7odmiZMw6+dzOazyFalVsp7FGnCMQ+3mcnZ2FqP6hCZxEZfKVUJbCFj7j/LPck0sWpvG1gntXiUyHePQiB8dHYW+QVkx1jY3N2PZ19kNRlu6s5RtFWCLZpIVRyROBsBpCZaNtPHN6wlr2bXGXbt9icS9JtiZhicuAAAgAElEQVTdm99tuSctivF4HOa75earrTedTifIdMt6g79xD8sQa33WFmK0S7PZzO0Gh3eLTF11tFWjWq2axE9sneK6HR4ehrUYV3bxRv3YSqWtpWxV5HVq1UAbM8EIypaXan4RPHv2TETsPQ3KVa/XQx/yVbtiwY2q0WiEdVmPY4sOnxOvWq6AeWRK2npg7Ze4ztryzGstxj/qBRlSr9dzyeXzstQvahEr2iMEc3U8Hsf2L2jjy5cvhzGm211k2k9PnjwRkYllSq9JGDs7OztB/vD41jKAx1qWtnKLlMPhcDgcDofD4XDkRKpFKu+plbVmmlYbmqnhcBhO9dBgQHPLMS/LCmZGLFC1Wo2dfGGZKpfLMQ3KcDiUu3fvishUCwbLh+UXz5YpbZVKo89d1IpgxUaJRAkL0Cf7+/uxgD2c/Dc2NoIlCgHDHGsETQKeNRwOI9oj1EVk0hZ5NdQWSYbWnpbL5VDel156SUSmfrUiIv/zP/8TuZ6ensprr70mItNEkWwNxZhNSyQ467NlgP22+bNZ5Tnv+JQ0sGYLcx4W0F6vF6HhF5lqoHQ847KQRtxydnYWtKmw5D558iTMIcg1WKU3NzdDfCHKz/IR49pKslx0H+ahVOZ7tXaaLRNWPM8qYgmTns9tx9p0kYns0jS7TDuO3+rYjna7HdpAxygxrD7MCstKorXiTMzAViesSfiM111Ncc5yVGvueV1kywL+h7zNip/+9KciMpkLkM2YF2yxxZzB86vVamwNZop0tk7pK9Y0tsTpmFi2Aq+CQCgJ3Od6n7DKeBq2DiXRSltWg7W1tQgJlci0f7e2toJMtyited6JRGOwrPipeSx0sywoWj6USqVYzDHPJTwLbYQxrJ9xXsgbo2lZIFcJi3xE7wvW1tbCXpTHk25v1P3hw4chXsriNdjZ2QnPEInGMGtk3bcW4tqnFzUOMtQmM3apwwTkCaU7s+hByS5selOAct24cSOWIZ4XNxyo8Cz8b6FUKgUBw4IyaSPAAZdZwRs+3V686LNLn8jkIMWuFyJTt4vd3d2QA+TmzZsiMnXtE4kL2V6vF2OUY8adeTfAHOCpF1RcRaYT7LXXXgvlRL/+y7/8i4iIfPe73w1C8Ytf/KKIiDz33HMiMhH6aDtsVNL6YVkMQjweWMhjbAI8lnhRE4mav/GMZW3M5wW3LeY/Duynp6fy+PFjEZkuYliwln2QsgKQdd/0+/0wl+A+ur+/H+aCPkjdunUrLASYGwiQPTk5ibkhiEiMHaqI/pu1IdPfMxsc5hoH6zOZBu5fFbIQdDBpA8t4yGOMKfQXu+egnyALIDP5O97cWQQBeTcmOAxZ5BHs3oa/2a1Ns5fimuS+JxKVrUweoNmzcGV376z43ve+F56B9sZGBtfd3d2gaOBDFrN24f0oP9pdtw+7/eFAxZ9pN8dOpxNz2VwF0BdwqXv8+HGYT1iTUF9W6iwLLF8xPrQLKbuJsvIH49xi90W/MlEVvrP2f1p5xYr1eTf6SW5+PP4x5jHm+G/ImtPT0zCOIBcssiqUeZUHk1myXbcr+g9yQyTucpv0nCLd6PEMiyCCD1QYI1Dmv/TSSzECCsj68XgsDx48EJGpDLDIy4C1tbXEfYWTTTgcDofD4XA4HA7HklCIRUq7kzWbzXBy1FqojY2NcNLUWt3j4+Og/UjK2bQooFFJy5fQarWCFcYiPUDZ4Np3eHgYs0pZwfFoi3q9nkhxylrgrGBttUUtKzLRTKLu7PoAzQS0L+inW7duyQsvvCAiU4sUyn96ehrKyG4j2hyv22Je6DxJ6IejoyP52c9+JiLTPtnc3JTPfe5zIhJ1RRQR+cY3viH/9V//JSJTDfuXvvSlUEetbe10OjHN97JpxNkdiQP9oZFB/7CGSRMAWGPvopFOcHA++ufOnTsiMpED3//+90VkOlaZijet3YvSlvHv9RjguQSNXqfTCeMTffTiiy+KiMgnP/nJYAWBBhr14qB6tAmTHpyHuwUHhGsrNstPaJRhRbAsO8vSpFvU4xa0tnx9fT30BbTkOteIyFQbDw20iMSsIN1uN9UdMu+cQ7B0u9023fdQHotQQpPQaCuUSFzLz2QAbH2CnGfXO1xZY58F7777rohEXbnwLt4bwNUa3gS7u7vhb7ZiiEz6EHMNn6GMu7u7sZxUR0dHYfxi3jG1Ou5bFiw5rKmXHz16FL7TLqe8X7CeWQSef/55EYkSPmB8sQcLXKZgUT84OIjIQJGpvKxUKkG2633g7u5u6HPMw0uXLoV+1G5/RXgiWBZ3lBUyd2trK+JJgM9EJm0OV1WWCyifltV5CSiWhVKpFAtjwZ773r17Yfzfvn1bRKLka8veN7C3EWQW5ijLeLQt5v2NGzdCefVYYc+s+/fvR54pEt9zX758OYxTK09YFiIht0g5HA6Hw+FwOBwOR06kWqSynqZ1AOKlS5fCaf7ll18Wkelpd2NjI2g4YE34v//7PxGJJmlbVqAl+0Mn0bxWq9Vwcoemhn1DOTmiyMQypeOlLCsRsLm5GUs+xqdkjv3JC11G9vWHFoX953ECh9YFcSovvvhisEhpYgameobWqt1uR2hCRaKn+kX8mzkQWmSqVRkOh0Ez9oMf/EBEJppYaC3eeOMNEZFIoOLf/d3fiYgEiwc0gm+88UYYs9wnuk66bPMijVbdGpeoE8rG2kpAx7NYVkKLuGKV4HGO8mIuwILT6/VCvXSMFAf3Lhu6nax4QGirarVa0Kx+4hOfEJGJJUpE5JVXXglzh+n2RSbaXsgO9DtncgdWodnUmrp+vx8saLC+v//++yIyme/QLkO2o967u7thfGbRJM9TNyuhsbaCWbKj0WjEguNRD7ZIae16p9OJ0aWnpZuYRzZAe2rJao7xgexlK5SVJBhl1OQRHHMECwDHjSXFkG1sbMRiNWcBmm+mr9br7fr6eigHWyxgkYKGHNcrV65ErBhcxnq9HpON6+vrwcqAtcyy8K0CqDvGIPZHb7zxhnzwwQciMl2zV5ko2EoBo1PYcIwQLFOPHj0KllTEteK7w8PDWAwb7mm1WqHP0Tc7OzsxKzFTq6fF7uQBexhoUhkuD8YK9gWXLl0KYwqWVp6LlhUtb8LeIsGxkJi32NNhH/HNb35TfvSjH0V+h/rjnmUCbXV2dhbxhBCZyg5ehzluHJ5SWH84jYVud8RMHR4exmQ17+0ty1SW/ZJbpBwOh8PhcDgcDocjJwpVeeDk1uv1gqUAp0tYnwaDQcxfGdqKVSTG4zJCu6JjvJjRT5/kReLJ5c7OzoKmFnWDBpd9Z3ECvnnzZtC0cPJUfua8QNlYey4yaWNoh/BZuVwOJ3DEE6Gen/jEJ8JnKCtrX3QixHa7Hes/rndejYwVa4BnoM22t7dDe3344YciIvLOO++EcgK//uu/LiIif/RHfxQ0mH/7t38rIlMNe7lcDs8Ca1K9Xo9Z9hax5mRpA2Y4hAbO0qiy1kjTtMKK8OzZs9BPHDO3SlpdwHqnTmaKuXbp0qXwt9ZSJZW56DpZ5dQJp0WmWv5msxms19CQYS7t7OzEYqqgmX306FGQfxZr0nnGu/E7UWeOa8WcgGYZ2mS2uM9KpCwyX7why0s9J5mGXX9WrVZjCXn5qrXwHH+o68IWKc1cOI/Me/jwoYhM2lizy1ksc2yF0vFPFt27tuoyJTk+Y8pqyH22gudNyIt5fHJyEmNbZcYzHQOxvr4eyqTjp65duxazUuGenZ2dGOU2s8HhM04ynJfSfRHoZN2/8iu/IiIir776qrzzzjsiIvKd73xHRKZyfJG4u6xA+zSbzdBGVuJky0oFywH2fND+P3z4MFir8B2z3uFZGNuj0SiS4FxEImMxb3xeEtiarVPCsBWNWZxFJnMDbYH5BQ+kbrcbS1lQrVaXzkKdFSgv6sLzBftw9BHkC1voGUXWgeWJ3gOjHM+ePTMZ/dC2WHfh0cKWQFyRAuf+/ftBtmJsWt5SPOazeFXkPkhZE5nzR4lMDkgwFz569EhEJJitDw4OQsHhcsX02kzIIFJ8sDUHeaIu2rWGF0gI71arFTZGFnU5/kY9IQjef/99kwQAm3UIB+3qNy80lTIvXtodqdlsxszXGIzPPfdczI0KB5T9/f0gPFFPFiSaLMGiZc+DpMNLo9EI/YP2v3v3rvznf/6niERdGEVEfvu3f1v+4A/+QESmQvIf//EfRWRyEIO5Hs+6cuVKjHo9LX/NImAzM8YLu6ZgkwpBiA1Eq9UK7Q6XCiAtU/x5wSIhwMKFjcPW1lbMhZGpTVcBa27zONTB7s1mM8gxTXLS6/XCwQkHd8iJx48fh+cz1W8ScYhF35oVWQ8zeHez2YykPMBnIpOxhrnBMhK/XzZJBrtXa/nAB3RWdolM5KLe8GBulEqliKJNJEreoPPjMZLy9+UB5m+73Y4QWnB5+NDEc4Ppy0WihA58SBKJ5vnRZA3r6+uxnD/sqpRXjmOTc3x8HNYKTVzCeZ6YrpjHoa4T9g7a/e/q1avhM3YT01Tb7Da3LBc6a55qWf3f//3fIjJpA2zsdD6dvLmB5gFkb6fTCWMOYwPjjd3xOMUGgPmEft3f3w+bc+wDcX3y5El4J5NVJCmSNYHVImBFC8qMMjx8+DCmeMAe7fr16/LZz35WROLul48ePYqtrSyrV9mXGpz3EPMMdfvc5z4XlNDYCwLLItRiWK6ker3tdrthrOiwGpFpO0LWPP/886ZxBFcc9HnfoV0BOR9ploOUu/Y5HA6Hw+FwOBwOR06kqmKyaj5Z2ycyOUFC+6Tpfnu9Xjjh4+TPJuQsriCLgDXb0H7gZAqw2ZAtUyg3gqr55Kw1DzjlHx4eyr1790Qk6iaC+9iyIDLR/s3rPsZuLlrbOhgMwnesaYRlDKd5WDx2dnZCW2mLwdOnT4NFSmeM5zYoyiIF6HY5OzuL0GHiXaAo/d///d9I+Y+OjuQ3f/M3RUTkd3/3d0Vk2iff+ta3gmsN+q5UKiUmgh2NRkt3s2B3JNQT44Rpi3XyUGgJ2bWA++Q8ySYApm1H/4B05vLly6Hc0J5Dk7xK14gk62O1Wo1ZzDY2NkJZMVaYphp1gysIxlq73Y64EYhM+k/T/i67z6xExLVaLWjyoeUHRX273Y4RA7GL3LLleBrYRUS7hvZ6vSCz0O5sEWRyCZFo0lvtksbac23Nm0c2MHkEJzPnsnLySP4MaxMsBmyFgnZVX5nOnNfgJMppXjOzAmQK7XY7zAdtmTo4OAifMckG+gDtgvWHqdQ1Wcbu7m6wVvFVJ/zF79bW1gojMcgCpjsXEfmP//gPEZnID3iFJLlULROwmNdqtUjyapRNZDKmdDLlnZ2dGDEE2v327dthrmiL49OnT4MFDta5Z8+ehTGB+yFj2IMoC7K4ejPJAOQDxhgDfXbjxo0wVtBXGK+lUimUnd179RpyHi7a4/E4zCGEnEA+XLlyJaSAYXfXVYHlJls3RaIhNJC9bJlKIn+7detW2Ne+/vrrsfegD7EGd7vdMC61N5VIeoJiwC1SDofD4XA4HA6Hw5EThSbkZTpTnGp1YttarRbRmPPv2d99Wad17TMpIqZlSlsFyuVy8CG1LFNJ5X306FHQWiAuolqtxp4PLc7a2tpCPts6Xs0KSEb5L126FCw5uEJzV6/XgzYI1idorfb29iKxUSKTvrP8UVHHvJrpNI2StrqhvKgHyvHee++JyNTS8fd///dBa/SVr3xFREQ+//nPi8ikfRBbBU0Zx/NBO7cKTbuu82g0ipGH4FqpVII2zUrGqefTRbBGiUQtUqgLCGmePn0a+hbaTtx7HhYpgOWctrq2Wq0w3qFNBZ11p9MJY5EprvF7jC3MSw7q1/VdlIwmC9gSCjkOiwXKxVpC1AXjjrWFyy6j/lskqk20SHeYcldkWv5qtRrxy8f9ItHErainZZFaJBExW5+gBbXo27UnB8dB6XgoJo/gOCiRyTjT1lBuO+1pMhgMchNCQXM/GAwiaRlEohTkmoDq4OAgljyXrRT4W1urHj58aNLbw4KC9Q3/b29vh3b5/d///Vx1mwfacok2Pjw8DH2nE6xbvy8aaEfLu4ZjdrWVant727RSiURJgzDn8N3GxkawcnPiX/Q5xgP3/TxpYdgqxJ/hqtuz3++HuC69L3z27FnoI6xPPNY0SQK/a9mYZYFDebG3QfsOh8NgvcFYzEsoUwSs2HDLIwRj4OnTpzGyKiaFYt4FEZFPf/rT4R4+i4hM2gRyDePNkodpcIuUw+FwOBwOh8PhcOREIQl5w8PIxx+aTGi/cG00GuGEqTVTnU4notUUKd4yZVl7tOat0+nEfGW5LbRl6oUXXoixQ/H74A+N0+79+/cjycMYnMwyK/jknnRKL5fLMcaiK1euxOhjoeUcDodBawFLFK7Pnj2LUdaXSqWYJk1r7eeBpVECmHUHqNfrwbqGNoZf8N7envzTP/2TiEx9m8HC8/zzz4dn/eQnPxGRiVYUGhAdqzCPlS0N3IcWhT3GDvzJcU+z2Qzl1lrdTqeTmKjzvMEWKcwZjK+jo6MgBzSL5aw2X6YGkDW0mlWPLYOw4HKMFGQAxhPHE+kYg0ajEYu1WbalnsHv1Ax3PLfxt7bwsCV0WeB2T7IccpmYrhn9iM+09Ye/g3a50+kErT0z6Gnt/SLjD5aRUqkU2hRlY8ugtjCxRUp/x0l0daJatqxyn6OvLSt4XqsA1khmRNReE91uN6wnbKVi6xRfj4+PI0xvIlErPGQ7vjs4OAiyRbcTU8AXDWss8NrL5RkMBrGUAasEewVp5l9O+8BJXkWiez2OpRKZWGm0hZStoXrvU6lUQr/gO8h/ZrLMiyRrDa9BwHg8jsVLcZwXLGroK/YM0TGXlnw4L6AcKC+nE0nzgFpl+bX1PQ0c14S4Y15/UE/sBREb+alPfSrUiS1g2FfpNB8Whb2FpaXORoEgeHlDrwOAWXCvyrWPO0tvXIfDYZi02BRZG2c+UIEaXZefNxwwGZ+cnAQKRiuwF5M1K3gR1IF3HDiuA4t3d3eDqwMWWzzr5OQkLGrYBHIuCDZfo576ILWIa59+NpeNoTdK4/E4bBRg1kUb379/P0w+0M5ic3T79u2wEIB4o1arhe91LqFlUubqzWun04kFsqJctVotNvl5A68p7xehzy4aul8x3nq9Xqi7djPgg/Uq3Sv5fSxY8fd4PA5tjTEGnJ6ehv7CnMCGoVarhc2GzqouYrtiLrv/rCBfTQvOG2OdX2sV44v7IMl1h4lVeC7ptYndOPRawC7Slrt00rvnAZRavDm1yGWswxJkl0UeAXmoFVxMTsSHJu0+zCk08h6k8PxqtRpLacLzH+9nN2XMGcgzzvUDeYhDE1Opo4xMV4/nYm6y66aer8uEJmZBP7H79rJSbKQB440PUmgzvmo38uPj45g8ZiUA6odxiXHKB3zcU6/XzTGKcmWhoU6Dnpucv4zXc52HDuvq2dlZ+FuTH1lkW+dxeEpTPIvYYysLMceyYK3l1oHKalOMQU6ZIBJVBuEeyNaNjY2Q4oeNAJiP2O/zGpFlr+eufQ6Hw+FwOBwOh8ORE4Wq1VmTqbV+luuSxiKkBHnvL5fLiRoOTtoIrZgF1OPy5ctB4wJrhqWdxcl2f38/aDZAwbhI0kPLIgWwS5pFGYty4z7WPuKkz+QLIhOtLlN8ovxWIt4ikaY5wWdM8w4tA9wKKpVK0IJBgwm3v36/H7QWqMf29naoA7Sz3NbL0jhxXfRnKAc0sZVKJWaV4ySeeMYqXcOyQrcf1+OiuERYsLRnrPXWbc7WW7YUiEzmjSZysNygitJSp8leDR7jmurceuYqNelZZAuXlb0gtJWKLSRJlLq8pi3LugtihrW1tZgmH3KLKcvZ+gTZrskjLOsmW9ct6xOvAfq7vIlR2XKO8a3d2pn6mD0jLCsVnqmt73yFbMe72+12zMWZ+3lel7FZSKO8Zm04Pi/SupkXGD/j8TiME3zGrpiWlUpbADn1CvoCbnKWtQrvaTabpvupSHEpVBg839kyhb+1O3y32w33o85p3gPnQXWehIu8nopEk//qsrILMkPvWTHWPvjgg9jaC1lw9erVMO5ASDEYDGJWMOyveF1Pg1ukHA6Hw+FwOBwOhyMnMluk8p6otXbS8vU/b1/SNJ9bHWjZbrdNH1tcEcwGjRq0i+xHj/fdvXs3Yt0RmVp9OJllVqT5vbJGB9og9lPG95r+sd/vBy2STr47HA5j/tC1Ws2MjeLrPHWyfjvLMqVj8FDHVqsV2haaL2gq9vf3wxiFxa5SqcSIP1ijuYxxy3Vi/3BNrWpp0S0a0POwFsyLi1DGrO+2gvT1b9kKiDmhfetZG8/WrSTa+mW1TdJzz1NLnoYsaQj4O4sII+0ZSZYp/XeRgIa01WrF4kc4Vkpbn+r1emJ6AKYs10naOR7KioPSCYj7/X6M3GcWOGgbZWRSKpGotYrvQf1wxbo1HA5jMRCcvFcTVzBRiLayDYfDpRPxWHML77xo80lEYiQyAMsly0plxVZpEi7+nfZa4tQ42nrJ+4sioeU4W76sfYa2aLL3Tdpau8o4pCLftcrxqfd8Fs8AEwNp+c0pLpBmRHsfdLvdGGHI1atXY1YnyKHT09NM8n5pZBN6MKZtiPM+syhkYZXjQOokNz82h+vA8Rs3bsSCSMvlcsgppRmGnj59mpvHP23icrZo7T5RrVbDb/B+XphwgMKCxIQOmrWsWq3GPiuiv9IOLEmmc+0+wQdZtC36CW3S7/dDG+D+ZrMZ2fDqci0Luj9LpVIs0N0SllkOIhf5IPVRhuWaw/2XxmjJhBUi0UPZReuvLOVZJcNT1ndZbZtlgbT6YVl9AtfiRqMRZLQmJeBDEzM7andSVgQm5Znrdrsx175erxdzUWW3uLx5pJj1VAf28zXL4Qr15fv1IWswGJiHQ8h2vc51u93ch8MicVHmk7V3AKzcm/w7rWzgsacZGi3loFUOvXaPRqOlHKQAXk/1ms8ywFKg8f9Z35PnN/PivPfXWZDm/prWJ7yv1d8xuyjYOvlAhbxZHPIBQ4iWb0+fPs3k+uuufQ6Hw+FwOBwOh8ORE5ktUnnMhUVYn1aJLK50swgotHsV3MPW1tbCCZjNwjg9wwQJdwS2BOUFk3VAY8dWMA5wx2ecI0EkSq8NS5SmsGdTK1uhirREMbK4F6WNT253TYQBN5lKpRKjOO/1ejGXmWVrlNKINJI+S2ufizbvzpNqtSjMo+lLkjEWjXdeK2PeshT5PAt5CC3mxbzPYsthnmesYrxizWg0GjHXJrZaalc9/kx/xxamrNYn/aw099VZYHdjTRXNlg62NuGqLVL8f5IbObuAQbYzdb3VFjqVR1FICgW4iGDyKD0/2HKr13he9wFYC9jNXu+PmGadZVySvJuHiGweWPThWazf87j6X7S1+TxhtR+PQz0mLZd4HpMs/0SmZCfj8TRPGNLjbG5uhjEMyzZ7XyHVQhrcIuVwOBwOh8PhcDgcOZE7RurjfIqeZZnSmjr4TlpBzdC8rK+vB/92nIDZNxjaMyS97XQ6C2nIkmJ6KpVKjJ7comzmxJU62Jifpf2mmba9iDGSheo8yTKVpPljjaoua61Wi7QL3qODghex9iyikUyKv0kLaP04zFVrHJxXvfLG42T97Xloqj8OlsFFsYy6L9Ku2vokEieQEbFpzLXFhf+3KKtxj17TLKKTReZemuWcy6/jldlKZVmrNCEC36vX8XK5HEtKzLFnq4qRyisDziPQP+0zyzIgEt838VqpPWP4f4tQJ4mwYZVyat45/HFad88DSe1nxaaxV5S+v1KphD203tceHh7G4vC63W6IScWzsGff2NjINA7cIuVwOBwOh8PhcDgcObE01r5FkXYKXPaJf5ZlSlMqcsyUZnMZDocxKtsrV65ENIAiU03Z3t5esATlBcdiaIpfS1M3Go1iiWbZt1Rr6vj32iK1jIR5IukWpiS/5CzWLP2/RadpMbGdF5Lq9FGLRwTS+nXW7z4OWMS6taxyLPNd591vHwVtMceP6PnOMSaaCY2Z+bQGluODdPwUW58smneA5WLe9uPYLj2+0t41HA7NeBwR21rFVivNHmtRWvP78rLkLgMXZVzOYny1ysnxVYysdbK8aM7TIqXL5Zap1SKNyc8C5jmszBy3x/IEV8T+A8PhMLA463Q3zWYzU3qEQg5SqxrkqxyYaYcp3bCj0SiRIpGDKnGQajQasru7KyJxusVyuRxyTC1S7qRFCGXCu/WBjhdb3a9WrolFFtmsWMTNL+vvsrqvnLeQzPLej4oAz1POVdUpjYZ1Fi7KoXteLMP98KMyFi8CeA3RBxt28dMHI3bt0656TFme5dDEsNaQeQPqWXGCq6aX1mXS5eNNkV7X+GoRIVlpB3QZHbOR1QXQ+jvpnjRchL5Z5EB1Ecr/UUXa+mu1LadX0IoTKJpYecTylkNxRKL06llo9921z+FwOBwOh8PhcDhyovRR16I6HA6Hw+FwOBwOx6rhFimHw+FwOBwOh8PhyAk/SDkcDofD4XA4HA5HTvhByuFwOBwOh8PhcDhywg9SDofD4XA4HA6Hw5ETfpByOBwOh8PhcDgcjpzwg5TD4XA4HA6Hw+Fw5IQfpBwOh8PhcDgcDocjJ/wg5XA4HA6Hw+FwOBw5UU378u233/7IZOt98803S1nue+utt2J10kmJy+WylMuTM2a1Wg1X/F0qTV41HA5FRKTb7Uq73Q5/i4j0+/3wvEqlIiIijUZDRETq9brUarXwLn7meDwO5fn6178+d50APHcW8iRmzvpMC1n76S//8i/HIpP2wfvQVnzFd2llOjs7E5FJHfG3deX7cNXtYrXTW2+9tXA/zcIibT4PsvbTm2++ObNOpVIpVn7rM0Zau8+bRDxLP/3VX/3VGO/AeBiNRiIyne/D4VAGg4GISLj2+33p9XoiMpUB+K7X6wV5gBgKEJAAACAASURBVCu+Gw6H4blcr6QxX61Wg+z453/+50x9NK8cn7edVyEfstSp6GTz89brPOvE8wxjyEKaHLSe9fbbby917Fmw6l6kXFxkD3HRgHbJO/aytnHaOMx6f57n8r1Z6vRx3Lv+zd/8zVgkuhfFWsD/68+q1Wpsn8lAm+t1bjAYxNY5Xvt4PcTvITu+9rWvLSwfeG+ny409Nd9n7RG0LMP/+v4sSNs/uEXK4XA4HA6Hw+FwOHIi1SL1cUcWrQpOvrVaLZzwAT7tsnVKZHqCx2/5ys+3NPWLIM/vs57IV20N4XeWSqXQB7hCu1KpVEwrlQZrI6B10daG0WhkfsfWqfPCebR/XmTVOLJmLElDfnZ2FtMyrar9rbGSpqmDFarX68UsUrA+9Xq98Bk/A//j+QzLEoX/Wav2/9j7siY5rvPKU11r7w00FgIgCZDiIlGLJZmyZ+zRhMPhUMREzA/yi205/OLwiyP8E/wT/ObwMtLMeBuvkihaFEkQBEmA2BqN3rvWeag4t05++WV2ZlVWNSjd85LdVVmZd/3uvd/5ljJ18vrD0+bZ77K+P+t9We/8oiKvHef9zrPusWXS8ZOljVYNstUue886i0WeF85T9n5RcR5t5u1pYt9VB4914rXVaoWr/U4ZKbvmKgPNua/3WCZH154i1kBlYJ+nz+Xer8gYU7lWxMpInzXtfvyX9iB11iGKHaf0KAcmf8vN0GAwCJsmu4mq1+vhWexU3UR6A7ss5mG+V+a584Cao9i28g63etjKKnfe5lhNrLzDFa95E7JqfBE2oUUOOjretZ/yDlL2UMu2GA6Hpc1KykAP0d4YAcbzXg9QwHje24MUr55pn441u1jpGNYDFO8pO+60j/IUOHkmEvb+omX4RT1UzRt57ZtncufNM9uvOq7teFQlhjVlV1lcJbyDoCLvsF/kcHvW88uirKnbLzpsH+jVrptElAXloQcpPTgBE7eRZrOZ+Ju/swcRot/vp9ZYT5mYp6CuYk+kMswz47Of6Tro7ens/kHHqDdO1VTQK8NZiKZ9ERERERERERERERERJfFLy0gpeFpV5kNZD2B84qdGwDqHd7tdHB8fA0AIOkFNn1KthMeyzFujVbUZ3zxZAX2+vscLAEKNjGprsmhgj8ZWZ0oNHMB7LN2tmo559dkXSVvnUf7WlMibT56WTNkgfsa+0PHgsbZVmchqGewYUVM9j33i35QFeo9lpKxpqZZ5OByG8ZzlNDtNnc4KZpEXlCCPeZyGndIyPC94XspTlI3ymCLOL51b7H8dv8B4nNr5tbS0FGSqtcrIY5GnQRHms+p3zev5RZ571jx53ueHB4+F0LFn2QLrMhFRHGqyZ5kovXqMlF2T1fqB65tdkzzzOe1rz3yurEVVEfNAXSO1jDboRZ65vK5zun8Exm0wq3yIjFRERERERERERERERERJLIyRKqK1XLQGJqtMemrVk79lj1TTd3h4CAA4ODgAkGStlpeXU8+3gRP0VO85n1dVN/ud1VSonWzWMxZhD662uVnacLV3VadLG/5TtahZjFS3201pbPUz6yeTFSRgVng2vGVDfy/SKd5jO1TbBSS13Ko5s4wU27bb7aZYYi/EqiLL57BsG6idtRfiHEgGj1AWikyU5yPFMZXlc6dlV5twyzTkBenIAp/l+WMR6suZx0Do77K0j8+L/9RZz8xzXD4Pn5eyMlsZeqtl1TnFcci1aX9/H8B4PLMPOT9XVlbCHO10OonvgOl8ePPqsmicR8AQYpF+XIuCWmbYkNy1Wi3hR67w/O1+mf3MiiAv2MRZjJSFzmPbN3kBl9RSw/NNmlY+eHscLYdln3RNtSlFtD52H99qtdw9QtY+uOicjIxURERERERERERERERESfzS+UjlaRxVC2zDSrbb7fC9PR0fHR0FLd+zZ88Sz1RGRSMpWW18FZqysn5QWg7LtmnUsvMIk07NhoaRt1p6T9Pi9Z0yU17kKmCs1aDmlr8/OTkJ7UOth2oq5hEJymMrlaXM85Wxny1Cy+nNJ9tPrVYraLdXV1cBAMvLyyk/IPb10dFRIooYMOmvXq+XsuHWfiVU61SmDbzEg16EPusPdXh4mGKiNIKnlRleGFmVCVX6ICpLYe3h9X+P3c3TbmYlt1ZM6z81T9jIdjoObfuct5bci4hm/Q01eqn67QJj9ml3dxfAZG3imK3VasFagvNzY2MDa2trACbyk8+cReNcJaqQa4tgpvK023lMb5XR0OYFXUdZdsoKtcChzzhlpzLsedEYqy6nhy8aA6jyyu5xVBZYWa0WDl44cMsw5fmNq5WO5+87bWoO/duzGlK/TiC5BttyeH6eOu44TnV/mGXlUXRsTn2QOo+wtlU7Y3pBJoBkEAMuMO12OzUI2bkHBwdhkaL5BDtwZWUlvE8ngnW2nGWBmuYABSTN4Gyd8hz2vHavegxwAnsOq3mb0GazmQrn6WX41s0BMG4LzwkxazGsqr72efV6PSUcgbSjuI7FIiZB85qjWo6sMPXtdjtszjY2NgCMD1SsH/uVQnIwGIQFmM/XQ4zXv/ZQPW0/6WJi21wPSHqAsp+pSR+QPEh5G/QspQqQlk31et1VIORBTbPY5iwb33V4eJgaW81mM2yKKMd41fliF8CsBfV52Qyyna2j9mg0SvXdPMx3y8BTHlmznnq9Htqb84aHp0ePHuHp06cAJvOLz9ra2sKFCxcAAFeuXAmfcbzYudftdt08U/OCF/JYr0D+mnSeUHMhzwTTbvAA31QKGMsi73BlsYj5ZWWUHq5ZJ8r60WiEJ0+eAJiMPdZ/eXk5JaOrnGtVtMXzdgjz8md6uTW9snkKSSB5MLLr1/HxcZAnXioP6+4wayAke6DT9Zdl0zLye4LysNPpBIUtxyKvy8vLqUOWlt3uMYsGFYumfRERERERERERERERESVRmpF6XrSKwPwS7SkjpTSgdWbjaX1vby8wUjTxI5M1GAzcsN08DS8yLKsyUbyqRhqYmH+cnp6G8rIu6kQ6b40MtaDARNvCz1h+pZ5Vm+eZw/A5al6pz/Yc/M9yDp5lLmQ9W0132O7qmOtpgcqE3q2637w2UCYKGLMYZKKoAV9fXw/jiX3IOh4dHQWNkHWUPz4+TmlA1cHWhqkvG5hBzflskm2PkcoLNqEmEB6byjp4oduzzFg99u0sKBNORon9xrK2Wq3AXFAG7O7uhnJwLG5uboYrtX5se51TnmbP4jzWEmUKrCOymvadN7L6X013+F2v1wvz49GjRwCABw8eAAAeP34cxiXZxatXrwIAXnrpJbz00ksAgO3tbQDJdY7PVM2w1QLPCxqOn1BHc4J9p+zPeQYM0TXHBoppt9thHtlAHktLSwkzc8DXviuzvehw6fp8DbSlDDYwZjWBJDPNsURZsbW1FeTFebO+0+A8gpboviRrj6IhyNWcz5rLKcPEfSz3gPq/TeVxenqaGWxiGiizadkytQSxFh2NRiNlUaBWL1yn1tfXAUxkn66dynx5qW/0fWchMlIRERERERERERERERElUVi1WdYPwzspAz5jMIudc1mtU55zvGp9vYAF9qSsLA5t0nnSV/tLzznY+upUrdnwtNwE33l0dBTKff/+fQAT2/pmsxns5nmaL+oMb9/jlecsWH8NfYbnOEjNyerqam5YTMuu6dhj/1NT0Wq1MsNVq5anKIoyR3aM1mq1FEOjDuNeEs7zgNXAsq3X1taChujixYsAxlojtre1wwaS/ofAZFweHx+HdlFfDjuHdf6VaQ8dM3YcebbjarttmSt12rW+UTrubL8pc2r99jSwQFFQu1av11N9w/64fPlyaGOyGffu3QsMx87OTuK7zc3NwGLwGRqkwDK+/X7/3P07+B6boFbXJk/u5JVvXmHbPSZK/wcmY293dxeff/45gIkcf/z4MYDx2KNW9saNGwCAN998EwDw6quvBhnP5x4cHISxzbnHq/onzhsaCIPQABoc06wbWRBvnTsveOsVGWH1FeV3bFv2K61b9vf3w16Dz1QmaN7+QLY+QNJ/zsrjS5cuhe+4bpLlVv8p6ye+KGYqb58KpP1YF+ETXgRe+9urylllVWwqD65RR0dHYWxZRuro6CiMRV0Xs6wrgPKJlr1kvnaNVH9cjp92ux3mDq1cuA5duHAhyAWuc2y7fr8f6kS5dnh4mGLxvCAYeZhL1D7PIZodp9GtuDioiYg9YMxTGFpBpwceG9mjXq+HAcTyU9Dt7u6m8kexw3Xzohtetk+VzrtnTW49QLHcn3zyCYDxpkmf8corr4RFlgNVA1JwQpYdcEWhFGsWzar5vjhhVlZWQttzMnHRWl9fT0SNA5A6hGg9dENjTQIbjcZcnGM1YzfH2Wg0CmZX7Ke9vT0A43pzE8F6qwnmvKGmLHbTp5H62Acs68bGRiqSENu62+2GucUNPDeGFIJ8LmEDV3hlLAJVlnhBJnjN+8w7wOeZ4VindI08adtUs9oXBcul5bAL0+XLl/Hiiy8CGM99APjss89w+/ZtAMBHH30EAGHD/uzZs3DI4oGK8uLixYuJDSKQzB5vF/9FRpcEJv1ic/+dnJyE8lJ2WJPReUIDztiDtJqs6gEKGMsEygU69xOXLl0KB6evfe1rAICvfOUrAMZ9zndynh0cHIS5xs90/JQ9xBeFnR/NZjP0gR7ygHEdKR+uXbsGYCLj2+22G202a22aZuwVXee84BFWPmnwBd04AknTLGvSrc85jwiTNhAJMJH3ly9fBjDeH3Gucf3SdZZ1103sIuvgre+DwSClCGM/6Ia+CKoOyKYHGGuqp/fYw8BgMEgpA9V8zx6keD0+Pk4pE/XdHhGiAbLKwFMoqrKbc4Lr1dbWVjiwc7xxHdrY2Aj320BWu7u7qai/uufyousWUR5F076IiIiIiIiIiIiIiIiSqFS9xJNpp9MJ2iR+RpOQhw8fBu0ZT5cvv/wygPHJ0ppXFQ0/OAu8ENo2BxGQzHMDTFiB/f398JnVPHjsluZQIGbJGVHUZIynbJ7Onz17FspNjd5rr70GAPjOd74THJGpxaA2WrWe8+obLzyndQRUbSL7qd1uB7aJddKrslPAhJlqt9spjYgXxtlzJq4S6vCubcA589577wGY9MXa2lpgEMgosI46ducdzETN0Wy29eXl5VQY7eXl5dCPaq4CjDXP1IaT9Xj48CGAJGvAvmm1WqkgClq+MnVXDZxlnbwcFuqEa5koj0m1ZsyeGZ+mJdDAMPa7otDxZOcS5Wyj0QhBCK5fvw4AeP311/HVr34VAPD+++8DAH76058CAH7+85+Hvrl79y6AifnOpUuXgpaQDKRq3G1bVC3jzzJDZ99xTWLddnZ2AsPxpS99CcBE2+nJ7KqhjKSVRezD4+PjIH/VnI/sFMcQ+/Ab3/gGfvVXfxXAhImiJrfX6wVTQF7v3r0b+lXNh4GkSfosKGI+02g0Uky7stO0pOCzXnjhBQATuQ6kzbEXAS8thQaPsI793ENsbm4GRodQq48q3CBmhb5TrXOsVQjH12g0CvKR9WRfKgOxCLY3C1b29nq9sNZQrrF8L7/8cpAPNkXAWdYpVbhweCwUn6vsmQ3l3e12Qz/Y8XdwcJBp2qdBHrw9l2WOms1m6bnmuVZYZnZ5eTm4BpB1unz5cliv+Bnnfr1eD+XmmqTpiciOUp7s7+8HWWctrYoy1pGRioiIiIiIiIiIiIiIKImZGSnPDnRtbS04tlJLwVPuvXv38B//8R8AJs5h1CZvb29Xmqj2LNiTtWrUrbZEnc9pq62nXGo5WV/VLttQp+ovYFF1ffU91pZ6eXkZr776KoAJE/Xd734XAPDVr3411OnHP/4xgKQ/CDUURTRk02hhNBSmZQaUNbCaoHq9HpgQ+mlQU7G5uZkI36zfra6uht+pba7VOM2bKR2NRilN6snJSRhz1JZ98MEHAJLJVlkXzidN3DcvqI25ZaSURfEYXhtAQ/2i6KfB+vLa6/VSoUzX19dd/zlepwk2oT5S1k5c2Sd+p6H4PT8GLxQ/62DDcGt7WTZbvyuL4XCYSmyomkn2w61btwCMGU6y0mQzvvnNbwIYy4Qf/ehHAIAPP/wQwMQP4vPPP09pPi9evBjGpfU7WqR2XRlfsjj0/7p//35oA+sfmsUCVhksSMeGDU+vfq1kosgcHR4eBnl28+ZNAMC3v/1tAMDbb78d+o4MD2XJp59+ip///OcAJn344MGDVMAknW/z8JHyrDCWlpaCPCPbxHofHx/j008/BTBhOL785S+H351H0B3r/9fv990wzpaF595ArSWsf+FwOEyFhl60L5GF+n2pJRIwsYjY29tLBebSVBeLZqLy6sHvjo6OcOfOHQDAD3/4QwATef69730Pb731FoAJ60YZsre3V6hvZpEXykRpsmZ9nu4fdI3ymChe+bcGmeCzbbJdZRI5Psv6Eyl0rtp1UAOzkHViu1+6dCnIZt7Hdx8eHqaCI1F2PHjwIOEPyvax6VK8fU0eIiMVERERERERERERERFREpWol2y4wk6nE06O1E589tlnAMYaAGouCWpHvehWCs+2etqy6vNsRDY9hWooSS9sIstvk4RqFDmP6fDCaXtlnKV+FlZztLa2Fmx+aUf/rW99K9TjZz/7GQAEBpGa2+FwGLQAyrIRVWoCVRvnRZ6hxks1J7aeqtmg5lajxwFjhoraTY3oZ7URan88r3CtXvhz1sVGE1Ntk9VOeZEIq4ZqbmzocdU4erbcLDeZXfWBICPFKzV/Ok80SZ9lejx/kyLwWCfLhHoJMs8KC+tFBuXV+kG12+1MRkqTDxeFRgi1ob8p0/b29oLvDZmOJ0+ehIhvZKbIDrzxxhv4xje+AWAiH+g/9cknn4S+pazv9/spnxeOaS8p8Txhx4gmdrQR0xYJ9RmjPFMmChjPB/5NXLt2Lfh0kYmiPH/99deDPOP8Igv1k5/8JPxNxrfb7YY2YP9o+8wrah+hVhN8F2U0LViWl5dTLDb/V1lwHvBC6at84xxjuSkzOp1O0LB7Uchs0mvvnYuAfZe2t/qA815l7BWe72qV9SiyV/RSIQyHw4RvDjDZ5124cAFvv/02gAlrT5/ld999N8i6efuNDwaDFJunljPWWkJDnHPvyvGnPlI2obyuF7qnsD7QGmK8LMuoe24b7Zf7Nt1D8/mnp6dhLpF1Yt0eP34cGCjL3j979iwRhRRIrsF2vS3qY12pVFRam4W1G+0rV66EQcgFlVS2bprnZdLn5RDwQlzb0Mynp6epwagbWN7PjlCneisE1SFwkbkgbJ6f1dXVRBZyAHjnnXcAjAXED37wAwCThZcL27Vr14KQsSZLVUH7xj5bM8DbQAC9Xi+0A9tb+0QPTsDkQLW1tRU+47hcWVlJmaSpoJjHGPUO+s1mM5SNc4f3HRwcBDNaLrysU6PRmDpTd1HoptSaaqkSQtMfAOP+ojC3h6adnZ2wKHGuqeO7J9yz8i41Go2pTPt0bFkTHQ3Jn2fqqXPDO0AByc07F5BOpxPml35m7y8KlTtZfaTmo1ygPvvss+BwTfOw119/HcBYBnznO98BMAkW9MYbbwAYyxCGTdd8Rux7m+ek0+lMHTa3LDRELzesDKhx8+bNRF4tIB2K2nteVVBzKXtQoLN+t9sNawvn+Ze+9CX8yq/8CgCEwy1N/Or1eghUw4MuD77vv/9+6B++e2VlJTzfpoooO5emgbeH4PzTYAbW5NIzNTtvaJmA8X5BzV+BSb/WarUg4+lET+XF1atXU2ZMnpKPWMRBUpXnVnHGep+enqZSW+g6uogDYNGUMJoLi0oJjjeOv6997WtBLpAo4Aa91Wql3ADKmpWfBa5NCvvOrMASlO0cb3qgsubeeqj03F6sIk/3z9PKcW8cabAWlpt1Go1GqTQQVAY9ePAgHK443/i7wWAQysi5tL6+ngqcoeUqIk+eD4kTERERERERERERERHxBUIljJRlbw4ODoIzKE991EZev349mI9R20dNer1eTyUYm5fWQrWz9qqMkYad9RI4EtZBjto8ZTVUg+QlpeS1bJ3LZiXX/uKJXZM7AsC//du/BdM+tgsDU6ytraWCapxVjrJ1UkdGm5TUmpABSS2YdfrkfV6oTw35yb+VmbIO8vruKrVNee1Tr9fDeCL7xP97vV7QjrF/+H+/3w9mJcoGVDmndM5YlkOZHGrWdbyzbGpKBozpdxuOlGg0Gq4pnO0fb4wUgZrqWYdeTYppmXOPSdTQ/DYAh5pFcIwp+2TNqjRgzbTBJlS75rHwNvzt/v5+0PLRAZvs9JtvvhnC7pMZIbOzvb0dQvFrIl+OActIeZrWWZBn1lOr1UL7UfNPc0XVtrJf1Zxm3tD5wj7gPGBdNjY2AjtBdvqtt94KJpgMksHfffrpp/jJT34CACE4CNnC3d3d0FbK1PNvjst5m/Op7NCk3FY+cJxcuXIlyDqyZRzHvV4vlHeRJm+EavDtXF9ZWQnl5VWtWixrz/mytraWCrCjbIfVmFdVb299s8ESVB7r/o/l5/1WxqllwTwDipWFht3nGqvWRmR1uU/SwAXW6sNjMmbZM5BpUpNEL7CEF0iIc0lN+ni1puu6B7dWTO12O5VyRoOiTNuXXpAMPktdZzTgG/cLykQBYysIZfCBiXxYXV115VuWaV8MNhERERERERERERERETEnFFY1ZWn59ITN746OjsIpkac5niTX19eDJs0m3lqU3SyRleROT8e8ajI9dWwFkiG3GQxAAxdYu0s9uWcxU9PUw9OKe9Cw0+psDkyS0D169CjUgdplhki/cuVK6FfV0lvM0pdq+04tlg0BqyFm1fnfYxB49T7j1QYQOD09DX1tNWqNRqM025GHs7TobA+bSHhpaSn4dVDDTo2aJtTTBKxZ/TLL2PMSAWoCa96nWi8bZILastPT0/As1lt9hjgudY55miSgvEzR9soKZ67zVzW0XrJdIKnxsj5Pyj5pHa2PlDr8z+JPZLWJnrMvy3pwcBBkHpklav1u374d5DiZKTI7nU4n+OiwHltbW2FNYL8rczwPxidLBlrfSVpEbG9vh/6k7xAZ+5OTk7mvTTr2OObYT5zTFy5cCPKYrMzNmzfDOGEbM2HtT3/6U7z77rsAJrKd2uhWqxXYd8qQzc3NFPtBzCvlA5BmcYfDYbCS4HikDNvY2Ah1t3sIDdRxnnuJRqMR+kTLwbHH9YTtfnJyEn5rfcdbrVaun0aVIfjznq/10HZn23P8kinY3d0N91kWQBmp82AO82CDjxEnJych7YgN/X18fOwGeaoSytRYNp/Xo6OjVFC0/f39RKoevR4dHYU9jmWCdN+h1j3z8Lf2AiGpVYDdp2qKFDLW6g/F9cTGLNBQ6irzdK4BycBwMSFvRERERERERERERETEHDAX4+d+vx80mQRPtI1GI2hciFkiv5X9jfcu+5kmbVStOk/FXlI9G56adWy3226oSutjUYV2pmhbqI+CZWF4cn/11VeDtvL69esAJskpl5aWUhFetNxVaJjy/EDy3qX29p7Nr7XDZT00ea0XttuyLbMyA2VhmQRlFPi3jRLF74H85KdVaQSt7wvbX8P9sxz9fj9ol3ilZmw0GoX+t0mGl5eXU6HrNZmyZVrKygdlobwkzFpPfX6e/5H63njsk2U7l5eX3Wh9fNYs485qrzVakbV573Q6mX6hd+7cCZrAjz/+GMDEf++FF14IsoNtocnWWXe1Y/cY7XnD9lOj0Uil8lgklJXj+KemlGP96tWroZ2pUe31eoFtIhPFsMwffvhhiBDH+cgxdfHixYRWFkiGOLcyT30zqoZldIbDYRh7ZNmIdrsd2HeWkXXKSiNSReoU+6w8eOtJu90O+wJqxblP0gh3lrVaXV0N9VNNeZnyVAUv1Q3HKseXhpq2jCrrBKST3D+vUJbUhggnhsNhSmZ4e8xZoFYl1tJEU8JY1ml/fz/1mfrD2si+hK5pHhtp6ztNkmVdU63lh9aN1ipkqXd3dwPLZhOIr6+vp/bjlHMXL14MY1GtezivLJtYVN5VepDSxs6bHLMuUlVlkc8L18zPNLw2BQUbl0JiZWUlCEguSJqdXBcHIJmDaJGOlnYRGY1Gqc2NmhBx8eaAY5scHx+n6Neq6Xlvs2j73Qt7raYG1olyMBikDiSE0uWaLyhvYz4PkwQ1kTvrPoLlpZDRXE1ad6D6A68+yzrAqnOszXHV6/VSoc01rwPnk5UVepDSuWYPUtP2U575nsJuwtUEwI5JDYZhTfZWV1fDZ17KBGuy2Gq1KjnAe+ZANq+SHgBZNg2CQBlAx2sejO7fv5/KI9jpdMK7WF9Cn7UI2AM/x6GagWq43EVB8/HopgCYKLEuXrwYxgI3F/fv38f9+/cBTIKCMNjTzs5OGMecL/osm5fOM9H1ch1WWd8sk3TKDG769B6W267F9nmLhqeo1c0Z5xH7Qk20bPuelaNvkQcowDdRVIUi11sepPb29sJ3lNk6989DeZKFsgftLBP8ecI7SLHNdZ9qU8Ko+R7XZD08eXsDILlHVnNHmwpGDyFlzRqVSLABfjSAhh0rrVYrpWTVHK7cP3D90cOT5gllPayMU7lfRO5F076IiIiIiIiIiIiIiIiSmG9cUwdFNe6LKAfhnUY1USWvNhSzasF5OrbBANRETZMN5pkLzQNZGhTrWKlOpPzMJkY8OTlJOfSepY0pq61RbYGnFeE9NsGpmkVZbY06NNoEgVpfQulme606/LnirMATiuFwGPqFmnU1qbNOqfN0GOeYtmVURspL0svfKTtqNZhqSmLNZzudjstEAeXr6zFShJof2fmif1sTuVarlTLV0/QInrmfhjvXZ04T5CQvCE3eGFtaWkqwU1p+DcRizYN3dnaCNlQd5/lbll/NvRe5Jtjw/GQ8lBWz5i6LKB/bRceLBlQBkikrWO7Hjx8HRorBQMiotVqtVAJfNbNipqltngAAIABJREFUv6r5kq3zeQUFsJpylYvWCX7R7EwReOyUstRAso2LmBBn/b9oeOwp5YGOTxuES+vtpX45b5x3u+ZBGRvreqIy2Aag0CBbZYIkNZvNVKoRtfix1hJ5SaKzoH2ftd5q4BYNY26ti3RttQFbdD31LNDYjrZ9iu4fIiMVERERERERERERERFREqUZqSqdNs8DeQ6A1kYTSNqUUmPEEzC1hLVaLZHEFZicgJeWllIBJdRHymrdqm7Xs56XpelS9sbajar9aJ4GZxbtjvokWd8T9UGx2pHj4+PQL1Zjrg691rHXS6Doaf+1n+Y9B4q2n9Vkerbnnk9eldo3bQv7/uFwmKsFIqhlXl1dTYXXV/8dath59cIDaz3L+HacxdAA43FhnXCVpbLhtdvtdso3SgNM2GAT7XY7FYZVmalZEqQWSV+h2kpbd2WoWF4vxD7HG1mTXq+XYreyWMRFwSZ57Ha7mXI4a65XWXbV8PJvTTTLspJtok/kzs5OcLy2CVBXV1fdlBz6bCAp2583psCbv7MEqPKeUzWK+tAoG2B9qfPWmvPoEy2Hssqcx5z/msaCc53jUZnE5zX8eRmU3V/NAl3D7V7Mm78Kb+8EJP3k7b0q49WP11pXeGxVWej4Z5uq75V9rq6DtoxqlaSplfhsa6Xj7cd1bBYZn1OvyPNwuj+vBdWaMOghgousbvqscGg0GsGpzTqyqSmjbiLtYD8PYeK1t+YQyHM2zot+WAW8BcYTBta0Tzem1rGy1+ulIsNo2W1EPHXstw6Wusl8XmCdxL3v5v1uhT0EAUnnUmuWo8JS84gBvnDXzaYVwmUFoQdr8qAbfy/6nY4bvXp5ofRA5eWMss/QsVllRLmzzP6yovwtLS2l5gbL6ilhhsNhYuHS300T7WlaeGMh78Bw1hyv0rTMixxlo3KdnJyEwyk3rCcnJ6ngFF4wE29T4UWPfV4OUPPEImS3135VBezIK/8i+k2VmxpASNHr9VLKElUC/SIcpM4jYuJZh1AvGqldTwk1q7YKa1Xo6CFF/waSB6oqDlJ5CgfdB1jFtyrqbHAwJUms6aPuC7NMIM9CNO2LiIiIiIiIiIiIiIgoiUqCTZQ9lZ+nJl81q1khD1VTRy2Lhs7WULHA+FRvndpU82KZHc98Yl5Qk6oy7V7WnKLqPvX6ycvTYx37+/1+psmR53TpsWzKfFmtuxdk4HnGeYRpte9ULZin7VYtE5BkOwida5aFVPNZa0LoObHmQRkXGzzCBirRsutY9JxxrfmemiN4oc7zxt28xl5Zsz97n5bVy49n+2ER+YmeF/PbMs/S4Cx2HTo5OQnsFNus1Wq56St4taagasZy3gElFo3z2Hvo3Ml6f14QrrJ9sog6qpxUdwAguc+xZmPKhj5vFh3PO3SO2r2Y7l3sekpZAKQDIqncLZr/0AuOxPeUNTvPs2jyxpiu/XZtVIsva3mkclRzRfI6qznzF2M3GBERERERERERERER8Rxh4eHPn0dY3xINAataFrUdBZK+AVYjqNpFz19g0RrAotqfvPJM6w81Sx09XyQvDDX7ot/vJ3w19HqWU2He8z0/rbKhPheNRbNRRfrZ0/h4if2yQqFqu3s21bP6hynraf3i8px4lSnzbLctE5XnD+Vp22ZJxDmtP+tZ/lNZ71F7d9VAlw3kUAXKBEc6bw25xxTxMxuOHUgG7bBsqI6fLN/J58UfKo+N+UVDVazTecObK3acjUajlIyeh2/9Lxu8IEBeAC7P6sMGaPAYKbXy8dYyj/W2909TJ/u3Xq3Pk1qNWSsJ3XN7jJQ3TouWLQuRkYqIiIiIiIiIiIiIiCiJX2pGKiu8NwA3ups9zfMkqwkUPS22tUP3NIHnpZ0p896yGtt51cnTbqkGnO1s7YDVNy0vIqEiK1ndIiONlcWsDOCiymHb1OtP+78X3cdjTvSzaRgptfX2/IOsX5AyUjZRYafTSWnvKC/UDt1jGDz2bZY+KsIsecjznyryLC131ET7UFnk+XACySTJOgYtc0no+lVFWPN5sXZlx8Z5jqUvAru5KOT5UUY/qGqh7L7nz8TvCC/CKmWHZ4lj1z5lt9Rawq5Xs6TmKDo+PDY9ay3z5Ke3984rR9lx+0t7kNJJ7pk+eLRh1iBR53Av79C02ZLnjbKLVlXPK4OzHHQt8kJhNxoN1wle/9fPvHfoIe55WyRmbf9FhwTOel+eMM5q97zNYZl20XdnmfRpyF89/HhZ4AHfHEIXNi+wRJZJzLyCugDVmf2VeWfZ906LeWy8q+6LvMXeU+DoNassnknl8xi8oKp3PS8yuQpzoS8airoFlMEvYjtNC12PuM/0lCNWQdloNAoF2fLMBG0wr2azmVqv9Fqlu4M3nlgPNQf2rlkKcm/dKhLw4iw8nyr1iIiIiIiIiIiIiIiI5xi154EZiYiIiIiIiIiIiIiI+CIhMlIRERERERERERERERElEQ9SERERERERERERERERJREPUhERERERERERERERESURD1IRERERERERERERERElEQ9SERERERERERERERERJREPUhERERERERERERERESURD1IREREREREREREREREl0cj78g//8A9HQH529FqtFjIbdzodAMDy8nL4m5mF9/f3AQCff/457t27BwDY29sDMMmofPHiRVy/fh0AcPXqVQDA6upqeNfJyQkA4PDwEABwenoaMh3/0R/9UaEUxKzTWfAyGttsyfq/zaCsYLbnIpnoFb//+79faZ3KZh6392uf55U/7z3f//73C9Xp+9///pl1OqsNi+RIm+UZ/O0f/MEflKpT0X6YNqv7tLnh9H1l6/RFQJGxN0t9qhhvZTDLXCpajmnqpP/b9eIsFK2TyrysZ3tZ7BuNRliveCX6/T5OT08BIFwHg0H4nnKcv+P/Z2GWucRy88o1RNHr9UJ5uR7y/mazieXlZQBAq9VK/G4wGIT77Zqmz/D+L1onb20quw7N+ruimKVOFovMz5lX97J1Klpu7512rOq+kfOI420wGITvOKabzSba7Xb4G5jMsdFoFJ7xu7/7u2fWSevD39mrvmdlZQUAsL6+Hv5mPTi39vb2cHBwAGCyF2Ud6vV6qszeXPVQ5bjzUMVYLDu/qtw/6H4zb23Jk2He7/PgtVne2hQZqYiIiIiIiIiIiIiIiJLIZaS8U5n3GU/e1NS1Wq3wd6/XS1yPjo4Co3R0dARgwmQtLS0FjQQ/a7VaQYvhnUCr1PycpQ3jiZdX1XDYcnjsUxFGSrU4VSLrmUVO6JZt81iT50UDB1THDFRdpyKa+ixtShmNkPeeIhiNRpUyJl4ZtN5Wk5QHzp0s7dQ8yp2HvH4rM/4WXYcy7zirHnnaviwLhqyyVFl3750qg7k2UYNM6HrCNYfrVtYctJ8tQg5S462adc4hllfvZRn5Oy0zn6FtNq9xeF5MVNV1KlLWIhYeVeGsuTXtMzx4LGXWu4bDYWoe6fjkeOQ8VEsmzlGWq9frhWdNC7veDAaDUHZ+VqvVQnl45dpzenoa9qyWadP7eJ3nXPplhbdm2P2D7i08Jr9In5Td1+YepGzhvP+XlpZSE6LZbIbPSIuSCj04OAhmfsfHxwAQDk+tViuYIXBCNZvNlLDn/2eZ1BVFEcE4HA6DEMijitWkAki2j15tR+kzzgPehsAeHPUeS1sX3XRVUTYPVZpVzctES02Bsg7l2u56eNC/y7y/7EavysVeN2e2vsPhMDV/8jYEOnfsglVWSM6KLFNnT6Bb2PKpUmXag3NRTKOAsJ/pwpRluqJ9q+YWdlGroo55Zc46SNHEjeuOymCWt9vtJq6NRiNhZmSfO+8DlM4DeyDSTSbXW6Jer4eycXOq/WYPXkB2n8+CaQ5DVRyg7N9V1KWIAuAsxcq8x8s8nu/V26s/5363203NI5ar2WyGPd76+joAYG1tLeyb+AzuG/VZs5Rbnz0cDsMz9ZCmBzsAib0s9672WaPRKMyrsqZ9EUnkrZseGWH3D965QE0w7TOrQOzpiIiIiIiIiIiIiIiIkijESOVpdjxTCXVq5UmflOje3l441Vumo9PpBEc/agOWlpaCli1Liz8tspxps5wkLT2tjrqWleP/rVYrtAfbqVarpZ4/L3PFs7R6WZolra9qkYCkVlRP/vPWgnmY1owqzzRrmnIUgfY/YVmZXq/njm+rkVGt9CLM/srAM6Hg32rqYeeRZ7phAwM0m80wDlXDbrWA8zZRtJ/pvLVOr3nOrx5T42kyz4PR1c9s2ZQZVPMbXq05tq4T8+ojb07b9m40GoGJ4hrD/tLgRTboRLPZTJkb5c29WedWnim2ZaRqtVpKg886NRqNFCOlJo1WI6/yfJFMlH2Xtw5WaVEwCzzLjKJWLXkmr4s0jy+DPCZK9zJ27pycnKSYKM69tbU1XLx4EQCwubkZvuMzbFCHo6OjqRipPHPjfr8f+o/P1r2cDdLS7XZD+flcllcZepUTnrx/HjAvq5tZkDU3vH2qWgjYc4TuN6x1D5A2cdYxbJH3nSIyUhERERERERERERERESUxtY8UsbS0FDRcGrqS2kme9MlCPXv2LGgbeNLXEJRra2uJZ2kgB88naVp4WhY9CStDAIy1LNbeV0+7lpGi/e/KykpKiwEgpbFVZmqRWgur/dZ6UxvEuqkvG+uiWqJ52aFnIUubl+WLodoF71rEmXwWqJ0ux4l1mNRQxrwqS2Xr1mg0UmzBvPynzoJtU88WndeTk5MwvnhVrbj1NeR86nQ6iUA0evXqVLb/8hg771k6f2wfeePTmyN8hueLOG+t4FkaccuAsq2XlpZC/3Kc0udV/XTUSsGOeX1vFb6uhGoj7ZxotVrB6mF1dRUAElpw1okWFBybnU4nyL+8fqpCdp/lI+Cl0+A6xT7g/61WK8FOAUnfMNbPe3eV8tyDZymgfWffX8Q/NE+zXBU8TXme32aer6i37nts1XnAk6Uew8KxpuwRMJYDrAPn3IULFwCM09tsb28DmMj2brebsFwCgN3d3fDsMu3hzUf7mVpAqHUE+06DoPEe6zelbDzvY5/a9ApVYtr9URFLnLz1p2pZkLdWevt8fqb7AGWpeI+1drHP1t/puC7i1+whMlIRERERERERERERERElUdhHytr/q++Ctb1uNBpB80wNw7NnzwCMNQzUmlG7qdFbqCWk1uzk5CTFRKlWp0o7bj0JWy3LyclJgiEAkNBcqO0vMLH7XVlZcUN5Wg2sp4mrok4e8iJvaXRF9iGj6lAbs7y8HE76lp3Le19VyNOiqw+B9UnScKx2LNVq5SPjle0ntq1GtWSbqraebcp5cnBwEOaRtTnvdrupcK2z+E9NO/Y8GaFltPLg4OAgMNOqwQTGWiSrPadGc21tzR1rli2oIpJf1jjIalcrR3ROW020h0VGeSqimVTmlLKaY6vX66VkO/+v1Wqhv5SZtzbt6oNZpczT8WfZo1arFdYYyjWOu3q9HsapTdExGo1CnQiVGXlsZJWo1+spTffS0lJ4v51L7XY7yDrL7ur9lhnnc+eBPNbGhssGfLmWJWs8VL3+qF+MfQevXnRFLaf1GR0MBpn7nEVaqeRZZXjse6/XS6xTABIsJ/dDV65cAQC8/PLLAMaMFNc+/v7p06d4/PgxAODhw4cAJvMQmMj4svXJsrzQNtfogNafi/NlMBiE+lAeeoyU7oez9hTT9Geer5pXP/3fW5Psd/aZyn577E2V8PYe2k8sr5ZHLaSApFxTyxcgaQHmWSzN2j+F8kh5phc6aFghXpeWlkLBORFI0e7t7QUhz2fowsa/KTyPj49TZnZKh89iumPr6Zm1caE5Pj5OhYplxy0vL2NrawsAcOnSJQCTg1Sn0wnP16AZnvmT/W5e8IQ9Bx6F4e7ubugfUvCsU7PZTAUM0UPtPA9OWZ+rCRLHIevGsXhychKENttdTZbsBMvrh2n6iGOpXq+HscT2Y9uurq4G8weW8fDwMMwfXtlPvV4vNT80uEnZUKxV9J2XZ41jn22wv7+fkAn63WAwCOXm5nVjYwPAuN2tuVm9Xg/v8hb9aeAJV30vF0s9tHsbJyAZDtwG1uj3+wvbOHnP9Ewd1JTXHqAoF58+fYpHjx4BmJhts322trbCeKZcbDQaiXkIVBc0KMtsRR3H2V+dTiccoFhGziUNbMQxSXmxtLQU7ldkmUbPiiKm9GrSwrbkestru90OcsEqbwBkmltqGbwxMg3y1gd7gO33+2F9tcFB1CRx1jJOM8/UdJX9b5VZ7XY79Z2aYduNrQbf8YLw2PunLXsW8syWPdNL7S/OH84V3r+1tYWXXnoJAPClL30JAHDjxg0AY7nOOfb06VMAwN27d3Hv3j0AE5nCZy0vL09tKpc17rIU5mx3vo+Hp9FoFNYha/an6XEoM3UdqwJaj7zARN7e0iqQdf9hD1J8pprM61jwgmbxvbOMSXuI0blhn7u8vBzaln2iZxKruLUHKlunWfspmvZFRERERERERERERESUROkjvj0JN5vNRJAJIKkxotkHtQ6a0Iynep4oNzc3U+YTXiLcqkK0ekwUMD698iTrOU5aM75Lly7h+vXr4W9gYjZSq9VcZ3prwuCFn52lTh68QAUEtUmk1nd2dgIdzzrxenp6GvpV+8ZqjKo0u/SgmhnVBLIc1ByS+Xj69Gn4zI49peEJL1ztLGBbqbkC5wM/u3LlSmh3hoe9cuVKGEM7OzsAJqYPT548Cc+wY0nZYmVQ5uU87jECgK/5Ozg4SGkk2Qa9Xi/Vh4THgLfb7cwEfFVo0C2r0Wq1UlrylZWVlAZdHZItG8P5dnx8nDLbVO10FQ7oZzFRrKs1p1StOvvmwYMHAIB79+6FfqPm8tq1awCAl156CVevXgWQtCxgX2o9WZYqzMg88xWbDH15eTmx3uj9AFLO7uwnTQxP6Fyad1AQQvtJAxyxDiwvy9/pdEJ7836O09FoFPpOtcFsxypN+zyGl+j1eikmrV6vB+uUy5cvA5iMM8/UmWg0GjMFpSgCjmMNhGVZ8k6nE2QD66EywqYC0D2BDczT7XZTbNU8g1PkMTjWsmBvby+MOdadFhWvvvoqvvzlLwMAbt26BWBiYfD06VN8/vnnAID33nsPAPDRRx+FtZrtwrm6vLwc2rFsXfICJlgro+Pj49SYUvZJLUe0zt6eTseHV66y/cVyq2uA3Y+PRqOUnFKrFRsQSNkeNX9mnazrQaPRSJnaEmp5URae2aKa+9o5oaHyWUZ1QbEWcbzqfsJzcZnWxC8yUhERERERERERERERESVR2EeKsKEGG41Gyo7+9PQ0aCyUDQDGmgye5nmlHf3GxkbQhqrjedWsDetkHT41kZw9uavNNrUk1LrevHkzMFJkEXhqPzk5SZU/6/nA/Jz5+Gwg2XfUkD958gQAgo3ycDgMmqWbN28m6nb//v1QfmoIqnZMLqIJ8ELvNxqNMPaoPf/ss88AjMce+4514VV9CTSARpV26Cyrhnnl/CAT+Pjx4/AZtXgvvvhiGF/U+tPW/P79+6HP6LPCZ2tQE/WbWlRiVGUGrI+Qhnm3/ogabML6dWiyVy+Yw6z95WljLYOrIbSV3aC2kpoxDc9qE5NrOgiyu/zs6OgotInHxlcxJj3neMuoARNm49NPPwUA3LlzB8CYGWUbUD587WtfC/9zvLFuOzs7oX6sm767St88byyw75SR4lX9bWwADX63traW0raqJtMLiT4L8nykbHCnZrMZ3svyKiNl21v71zJS+m4buGWauuX5XvH5agHCz7a3t8O4osyjVvnJkyeBmaes5lhUPyT7Pvv3tOA40CA6Viapry73ORsbGykZoUlfbXADPvv09NRlc6tkrQndf9h+Oj09Df5QnMvHx8dhPJI5fOONNwAAX//61/Hqq68m6sk16sMPP8S//du/AQB+9rOfARjvEfksrsu0+llfX0/49pWtE5BOxAqkU+ocHh6m/Ki1X7gnYv9pKgH2DeeU+rZVYSGh813Xc62bloPQxMjsPw0MYgM5KMNjQ8F7FmiEWo8VhcdEWXkzGAyCDGOfLC0thXOD9TPn51pPykMNGqeMvrVcKItCBykvko86JFuH1V6vFyYaD1BcmLrdbhiMXMh0cOoBhPdnCatpNu9aJ0uXq+CyQqrT6YQO4qaWQuLWrVvhUMVBqLmA1JwJGAuhLNMEXSinrZsHNYPT57NM3IzTZOzGjRuhfrx6po/8rN1uL8zMRelsGxHx2bNn4eD00Ucfhc+AsXCm8+vrr78OYCIQNaCDdcwE8qMZFQUXe6X1OTZ4kLp//z7u3r0LAOH65ptvBhMJbi7YJ9euXQuHKt7P+u/s7KQOvLoR9w5UVfSdF/DBC9hghZeOSz3sZ5XRO+wsAt5c6nQ6YcNEga4RnmxUNY5Jz6RUlTzzNt/x5DjH/bNnz/Dxxx8DAG7fvg1gonDZ2toKY/I3fuM3AEw2UI1GI5jtcFw/ePAg5YyuwYmqMNG2/+u6xX5aWVkJc579o/PeLryaR9BuOLTcVcq8vI2/OkZrREW+X3NiAeP1iO3OZ+nhSU12eM+8gx3ZqI26BnLuvPLKK3jzzTcT5eWYev/994OMYx9qbjm2j26Sq4zqp5stazKl8pbvZBuvrq6GPQQPChrIifLDRiFrNpvhGbqv0AiaWpZplC3eOLZ54g4PD1PKkFarFfY+X/nKVwAAv/IrvwJgvMayDtxf/OQnPwEA/N3f/V34mzKi3W6HZ1lFp0b9LAtbt7yD1NHRUZg7Vkm8srKSUPoDSbM/G0hIowJmBSIqA0+RZw9U6laj8s8G0aEiYn9/P9zHMUb5OBqNUpESm81m6t1Ev9+fqY9s26ic5RjXvuHcf/HFF8N9wNgFhe2hwdP4P9uHbaIyIy/3aB6iaV9ERERERERERERERERJTB1sQrVhlkE5OTlJBZngyXA0muTjoLaBp/yVlZVEFmkg6WhpTStUM1wUnrmR1YwpPckT+cbGRmCiXnvtNQCTkJ4vvvhiOMVbDdXBwUFoA2oBnj17lgrfWoXGIg+ek3K32w1UO013eEq/ceMGvvGNbwCYMHAffvghgHGf0sxCT+7zNkm0/9fr9aD9YXvfv38/lJNjkFqu3/iN38Cv/dqvAZhoAqlh393ddU2PskJ9TlNX1TB6miRgrHUlK0gzqvfeew8///nPAQDf+ta3AIzNJoDx2CO7RtMK1vfu3btBi6uMsGVblV2ZhdmxbaLPtVrW1dXVTKdeNe2zJnRq4qFtWJWZoqd9suYKKneUVbNloKzpdDrhO81nASTNw7xnTashy4JlorQNWU9q7z7++OPARPEzBpz59V//dfz2b/82AOCtt95KlPX27dvhd7weHByk+rTqACh5rJ3mUKLmn+NI+0bznAHpfD9APuM6L3ipHtTUh21p5eHBwUHKVEllGvtfx1nV7KetgzXBrNVqQbvMtebWrVthX0Cm/Z//+Z8BjNkM1oVrlO4lOI5tvauC5hCy4cCVeeYYUgd3jn0yb5TZGmSIFjqaCsauQyo37J5JmZCiUHM+NV0EJiaVBwcH4R2U1deuXQvz/5vf/CaAibVHu90OffdP//RPAID/9b/+FwDg3//93wMTxXl49epVvPDCC+FvYMI4Li0tzdyP1opA2RPWS/tN03GwPjZQjQad8HKDVWVyDkzmuwbesMEgNOgX+08DnpGR4r7v2bNnobwc1xyvzWYz1FOtWKwJuFqiVQEvbQDBOb2/vx8+55ihlc6NGzfCZ2oODIzXMWvhprk4pzVnjoxURERERERERERERERESRTykQLSvlGqDbM2yYeHh4GFoSaTJ+JGoxHYG8tIaRhjz1/JBpmYRguoIc+tA7yeRnnqpkbk6tWr4cRLJsoGYQAmDByvDx48CEEPqIE5PDxMaQdn0agXDXmuDonA+FSv/jTAREP2zW9+M9g8U1PBU/3jx4+D1oIakbO0ytNqZDxnRKLf74dysGwPHz4M76LPxn//7/8dAPDbv/3bQVPBepOx+fjjjwMjpY7pVfpAqG8ItTnUUPKqvmZkCe/fv4/3338fwMTG/Fd/9VcBAL/2a7+Gr371qwAmfUeN4NbWVtBu0kb9yZMnKcdu1aTNmvQQQGqceRnpG41GGDuc/8rS8nma9BoYawCtNk5tnL1M7GWg2ng7ZjVYjLW9fvToUehDygwNe2ztyTn/1d9QQ7R6SXpnhfrXWA336elpqAvH3ccffxzK9vLLLwMAfvM3fxMA8L3vfS8El2BZ33nnHQDAv/7rvwYHcg16wPZRZpZlmKV+WWH3R6NRqr469+xYPz09DX2giXj5rCxn6KLlKoOs32o5dC2264iyMqyLBjSyv8vzbaiKmcpiutrtdpgrZDzb7Tbu378PAPi///f/AgD+8i//EgDwwQcfBJnHtZiyHpj4+XKO9vt9l0UEZrMs8JhjQh3kua7s7++HOrG9uRfa3t4OQTV4pTy/ePFigp1iua01wyx10n0R917W72swGIQ5zDK+9dZbgYliX7Bcd+7cwf/+3/8bAPDXf/3XACas4pMnT0KdyMTdvHkz+PvaJKuauLws7Frg+elq4BPKLPYb9xhLS0spP1gNLKTBknjNk99l+0kT5aplBz/TeuhnXrAjTT3C/tZxzTryO7WcUms0rUetVivNSmkQjiy52ul0Eiwwy8+60gKJwbleeumlMKbIcHMP/ujRIzdJL8udl6A8D5GRioiIiIiIiIiIiIiIKInC6mfLSNmoWsCERdrf3w/aTZ7qqU1YW1sLWnIyOTzVa+hqjU5jGalZooxpklCr9dVQj9R2U0N28+bNcOJllBCWv9FohBM+T77U6t69ezewAfRT8cI7q2Zr2sgnedAIU2zPnZ2dYCvL76jZ+/a3vx1O9fTV+eSTTwCMtRhsM9WElk1iloe8sLmaqM2Ol5WVlcAU0h/qt37rtwCM+41RyP7f//t/ACa22xqc0omIAAAgAElEQVTOmeNzVp8hC9UI27DtfPf6+npgaDgvPvjggzCe/vEf/xEAgs/Uj3/8Y/z6r/86AODtt98GMLEV3tjYwCuvvAJgorlaW1sLfe7Z8M+SWsBq/DRxn9p3s2w2YhKvqnVX7Tmf5UX0y0qoVxY6xlRWAMnQ7dbOWkPd2qh0Kk80uhjrYMuqtvVZ1zLw/CpYVvb9/v5+YGep0QcQIqeRifqd3/kdAOMIXdToMYzxD37wAwBjZorykJrl1dXVVIQ1omrGg9D1wWNvCI3MZ/uV407lsudPNG+clYrEW4+B8ZpsQ2drlC67Dnlh0Kssvy03MG5Pzg9+9/DhwxB1lazGBx98AGAsFynbv/vd7wKYMCSffPJJKp3CaDRKhWquInqayjUvSbj1gx0Oh6EPNC0AMLaMoIy32vRr164FdopMyMrKSqqvZ9lDqL8428/6ca+uroZ2prXK17/+9bAvIphg9wc/+EFgEf/1X/8VwMRyZHNzM6zT3HO8/PLLKSZK/dbVT3Ea2PWp0Wik2klZafYR/x8Oh6F/uTYrI+WxktYndpYw6Gp1oRZhwGQ9GQwGqYS5KqdsXADdX1s2expGbZb9kt1TakRcGymw3+8HCy8yUozIfPPmzVR0RY7b+/fvh7OJMpx2rS8bs6D0QcrmsKjX66lQznt7e2EQUpixQOvr68HRX0Nb8h5LK6uZCzFLgANvYFizoHa7HTqCHXD9+vXgAMnJQ+zt7aVCbtPJ+pNPPgnCkguaLnz23brZmQVeyE/WmX2ys7MT+ox1U4dRCgEepDQghXVyzCtz1ZsNO+iBiTnVjRs3wuaPBwt+98477+Bv/uZvAAD/5//8HwATp8utra0wLpUuzws/XbZenmmC3XRfvnw5jD0NHsHFiQcobnYfPXoUNhg0rfrOd74DYGx2QVNGtsG1a9dCn3FRUxOGaZ2UFd6hxgt17o193m8d0/Uw4x3Aqg4T7uWZ41w5PDwMiysPDHt7e4ncHFr2RqOROCgDkw3RxsZGKu9Uu93ONPmdZgHW39oDGeuheZ5YjldffTW1YaX5zrNnz4IygnPqxz/+MYCxfLHKsosXLybyagHJg/O8DiRWAahjy8sfaDdruqjbcaqosvy6rtnxrEGSCM1ZZHM6al4Z1pMbJT3EZ5mozQN2LOuBgPuGBw8e4D//8z8BTGQd16j/8l/+C/7n//yfACbBd7g5evLkSbif61yn05naZCev/KrU0WAmQDLMPuf65uZmULSyjCz38fFxyiRRcwyy7rxeuHAhbBLZ97MErPKCe/E56t5As3GmPrh27VoYV1yHfvjDHwIA/uqv/iooWejqQHnw+uuvhyAVPIhtbm4mTI1ZHpZv1mATdrx5ORVVOcuxSBl/enqa6F9g0jZqvs0y67pURRh0lQF5ira8lAkaQh1IHgCtDFH5oPWwYd55T7/fn/qwq8pTQvfjdi49efIk7F9oLsu90c2bNwMBwqsGd+E8s2HQAT8QTpG+iqZ9ERERERERERERERERJVGIkfJC9OpJXpOZAeOTPE/xNtvz1tZW6pSotCRPiZrYLouRsn+XhTUb4nVtbS2wE+rwSRaG5dGAEmSiGBSALM6jR49SmeVbrVaKklUtSZVaQW0faguoeT48PAwaFmqaqCVaXV0NLBvrRGatVquF36nT+ry0ylkJN4F0MIIXXnghaO2o2fv7v/97AGOzOJrGkYliP7/wwgvBpILPHI1GKWo+q0xl6qGUuWVIlcamtmt7ezs44ZK+JkP10UcfpUxHaYL5/vvvB6dsBgtYX18Pz7eh9w8ODhLambLwnHmBZDZ07S8vaAR/b4POUB5o5nmVEV5SSq3jLPBYAbYTx9jOzk4q+TjnPTDR9lHmUTN76dKlMAbJRK6traVMAL0ksNPUw0uyCYzbkFo/zoOvf/3r+Pa3vx3KCSRDUVPz/NOf/jRR38uXLwcmn89iHwN+H1UpO1R+WkZvNBql0l1ocnRrXqKmWtY8jM/L+78q5GmjNYw5x43KMBvOmletzyxWHkXK7UH7xJbt2bNnYX7bhK+/8zu/E9I/EFx/f/7znwcTVbaJJnO1pq2z9NdwOHSTIwPJMPsaXIv7Ca4/1I4rI2yTj2r7aFh76xpBud5sNkv3obIMbCPr3vDKK68Es3G+e29vL1jfMCjI3/7t3wIAfvSjH4V9IGUcmayvfe1rCSaK9bRmhWpqW6avPBM6z7TP7r/UtJFl12SuvJ9X9vHq6mrC7Iyoch1SCwm1btLvdG1SxsiWl2sN66x10vFkZZ6+m1CXEZvS5Cx4Fj6275rNZig3LR22trZC/3CecG167733gsyw80BdJzjf8srlMWUeIiMVERERERERERERERFREoUYKQ1NaH17VMPHk+He3l7C0RNAIgmv1aTwWb1eL2hcqBXo9XopB2vVeE+rPVtaWko55VGLt7m5mXL4bzab4STOMlID/emnnwatDJkoapoODw9DGVXzbu1RtSxVagQ9rZ86kfLkTodP+tTs7e0Fm1MyHPy9hhCed9jcPCdHdZbWxHh0QmSfkL159913U0l6qWG7fv16GKOqtZ9HckovkaPnA6TJaDkOqdGko+4HH3wQbNNpf0+GSu3u6dty69atoB20jtKDwWDqsaeabI+RsukENjc3g40/rxpK3ya2pvZJ0y14gSGq8GVjfQjLwmt9WOaVlZWU7PLC4No5eHJykrLVVvaD7WA16WWgbIb1l2G51tfXg1zmWHnttddCOSgLfvSjHwEYs7tkqllPzqmXX345l91V36hZ66Tw/EItu9/v91PhwMnK9Xq90Ncabp//W38KZdLmHXRC+9D6KGgwBdWQs07sY/UFA8Zrj7JaQJLxtLKgqHb2LHjPtXXq9/tB5lFmkFW/detWqAO10AxmcOfOnbAf0eAmNjDDLPJBg1NxLHt+X9ZvamNjI2jUOT9oafDkyZNg8UFZp76WbBfusVRGWoZ1eXk5lW6hTJ049ikPWEZdI1nWO3fuhIBNf/d3fwdgwlAfHx8HZppMlPpFsX818I36xgPpBOZl4Y1Zr490btv9LBmpvb29xD4DSMp/rqOez43nw1t2LmmACLsGaih8tdQgWDautWzXTqeTCqyjgTTsONI9uvWR0tD508Cz/ACSPqBkL7e3txPWIMBkP37nzp0w7uzv6vV6IvAW65Tn21Vk7EVGKiIiIiIiIiIiIiIioiQK+0h5kbWA8amRmhOeEA8ODhIJeIFk4jlqZXia16h/XrIsLYe+exbtmPp9qS01rzzJ8h0nJycpvwgyAHfv3g2sDW2fqeUcjUbhWaq9sEkpZ4nmoshqG9VGs01XVlaC1okh3fm7u3fvhrCSZHE0UW2VUZA8FLGt13FJ7cXe3l5gYXglQ9NoNEI4cIaWJQOnmhlN0DxLOPC88mdpXzQqDjVQKysrKS0h59PVq1eDrbmNrnhwcBDGKJ+1t7cX6k5mSqNmTpuQl7/XqzJTXrQg60PAcjQajdAGNtm3hnPmGDw9Pc2UDWU1mTqebVtoYmEb5azdbgcNF+Wb+t5YWcN719fXUz5iykpXMb804pJN9aDl4FzguOh2u4HN5ZUJoT/99NPQ/hxP1Fxfvnw5yDdlWi0jVTWyxh8waYNutxvkN+WhMlLqJ6tXT+Z5jFRVzFTW+FV2QtOCcJ4oiw2MWQH1/+BnwHi8WU2sx+JVhayx7DHKzWYzWIVwLHFeHR8fB4aUMo9X9fWjFloTq2eFoy4Dldn2uWqhYaOSesy8MlQaARSY+HAcHR2lIukpG8a9kvZb2XVLrXLYbmTN1IKB1h7c7/z7v/87/uVf/gXAJGof3/3SSy+FCLpkpLjPWF9fTyTBBcbzMI+JmsU3VH+vbWh9C09OTkK5KL/JSO3u7oZ5xXv4++Xl5fAMzi+NoFeFfLBRRoHJnNZxZ60OdA9qw8uvra2FcaThxvmdjbSqPmRWdngRRYtC28X6gC4tLaUsjy5duhT6x0bxffLkSUhzQ1nAtW04HKbqqeu6V64i4y531+RtWO3mYjAYJBwggfEAZCOwkBQY29vbYaJyYWKHHB8fp/ICacd4m6Wyk0sPLPybk8HLeK8mhvxbg0wA4/CLpBdtYIl2u504oAFJ6l2DNfBatRkZkBzkbMfNzc1gjsMBSqHx6NGj1CZcHZmtSd+splRloH1uhfFgMEgdxrloqVkpF2l1ELUhV8+qRxWBTryNkl1Mut1uqAvHEPvi0qVLoX5c+BiQ4vPPPw/9yTrt7OyENuO85XxcXV2d6SA1LeY9XqaByhar7FBTA/bH1tZWpqN0r9dzHWiBZJZ6Few2rcAsMk83qZorT69bW1vhQMf58/Dhw7BhojM/5ZwG5OEiRRnfbDZTZkeeiXZVsO2hh1z+rZtOblB1AweM20fNsID8vD16AFhU3VQhpmY0HJuU4yy/KjF0nQXG48xuWKsONlEWOj/susw+fPjwYegzBpbgdxsbG6k0FqqYqfLAqwdp22aDwcBNcZIVanplZSVx+AUm4/L4+DhlrjUcDlPuFkS/3y+dR4rlWVtbS7k1EI8ePQqhphma/p133gmKSr6Tir3XXnsNr732GoCJsoXycjgcJuoHJNMPZLVrFfBM+zSojN1T8GD79OnT0DZWPnc6nZRyXA8aVRyovCAWHA/aTja4jJr+cq/AMaMyxOZ/1HVIFQZWeaB1rEIO2rQjev7gfNnY2AhuDqwvzx+9Xi8c+Bkwjeh0OqmDo5o4W3PFooimfRERERERERERERERESVRiJFSut+e1LrdrqtB4X02bOGFCxfCZ4RmIqc2lJpM1aJ6Jhtl4WX/tqzQaDRKsQL9fj/l3MZT7+7ubrjfBpZYXl4OWhjW29M4VwHtG4/pIKh9aTabQevEUzpNEz///PPQFzbkp2rAFsEm5JmEWMd11ZJYM5fl5eXQL2x39psmo9W2ynr3NJoyj4XyHK9tXQaDQcLRFEgyChxLZNs0bC3HKpkp9imQDK8LjOccnzsNypgrnp6ehnnP8qvjrNWeqxOyZl7ne/KCTZSB169e8AxCQ8lbrZaaHGSFwVVmXN9jTQerMPWr1WqpVA86Hyi/KdceP34czGM57jiXNI0FZYgyH14fnVW2quA9i+U4OjoKGmY7toCJjGZ5VXZYLf8iTH8tBoNBSuOsASLYn2TSlpaWUoyOphhRhoPv9spzVrmK1CkLnjxUZswy6JoCgXXSwBIcj1yv1BqjCrNSjy1R81l+Z8f+YDBIzXWd89bEl7JFzZmVjbSMgD6z7HxSB3yWg+XmGvLo0aMQwIlm/0+fPg33k3ViIKRbt26l0h/oPPSsj7KYqFnN+jyoyZiynzYAFNfJp0+fhjWW7aWmqJZl1HJXwYTqXseaaHtWOjo+rDmbhri3ba4Mqg1/rsHl7N51Vllo20bnlF3/Op1OYAdtsAx1N2LSXg3t7gWzyGJwi467yEhFRERERERERERERESURGEfKS/IBDDWMFOzwGu/30+F/iQjtbGxEU7u6gAMJJ0NlZGyTNQs2lmPkbLPUXt+tZelbT2DL1CD2+/3U5oN1SpRG6NMUBYTpYxEFfDCbWrwC5bb+hXt7e2F39o6TcOizRJWOw9W06Nad+vz1mw2E6FWgSR74LEFs5bfQx5z6L1HtUBWG31ycpKqJ68616hZ10AwVmM6SzJehWVflFHTIDTWf4VaZg3taxPHHh4eJvwW+ft5+Kpk+d6wjPqZatA9LaTViKtWbFE+hR4Lz3crO0E5t7+/H74nw+EFyeDz1SfM1rNI+WYF+0I1jYQynLzP+rTVarXAAthgHO12OxVyuGpnckVem1h2V8cl11v1b6Hc1qBOQFL7royUree86lEkoJCWTcvK39pATq1WK4xtnXN2PFYZxMV7ns55ZQ9sUArPn8/zzbTJXjVksyf7ytZP1xA+l77gvN67dy8EMqI8Xl1dDcwAGSkNZsRy24TCKm+0b/KYqKr9pTS4kjJSdi7rnoh7P0L9eCwjNRgMcvd5s/i6WkYq6z5erT9dXjqNs/b9dt229aoKHiOl9VD/ZC2jBndi37HftC2UPZ41UXfpgxShGx0b+x9Ix29XB8Ys51edXCrwqjZv4dVueDwzFI10ZA8b6rSmm3Wtf6fTSeWK8gb/LAOw6GJlJ1Oz2Qx1JX2tm1R70DwrsmCVk6hIH3sLmW6e7HhRE7M8U7B5Cm8LbwHOe2feIcWOQXU054aw1Wq5wRD4zGn70Gs/FVRqhsTP7AFO50dW3qXj42M36709QFW9sdVnZZn/5ckpu9lRcx+7IFV9uFI57inCAD/YSr1eDxtymzdLzVc8M75FHqBse3vmLrrW2A0S66uLs5037XY7dVDTvxcVMEXN1OycAib9QtOjWq2WMtfVOVXGnLkqFD1AZd2vmyGr3KvX6ylZ4CnJznpnWeQpxLwNoZUlOje9/YFdu1VhaPtwGjnOdbPf76eCLNA86smTJ2Gua64hOv3zqoFO1HQeSM45K/fy1t15BZ2w5toabMIqiA4ODsKG3NuM2/W31+ulZJOn5CkKL6BXXj/n7Y+KHKSy2jzLDLgqGei5pVgFhI5/G/1b83/aPcbh4WFKsaTRN6cdZ9G0LyIiIiIiIiIiIiIioiQKB5uwmhTPaVxNDXhKVOdkYHJ65G+BiVnZ8fFxQjtu310lM6Xv8MxQbPje09PTFFum4U1teEZloSyd6pUh6/8q4ZXDtrfWMSt32HnjrDbK0pSoprKISV1Rs8Iq4Wkyz2Icrdmf5pbwTEioobbMsMeOTAMv2ATLRAyHw1ReGy2r/hZIsmeWScsL5lBFPTwtstdOeQ7AeSZglunIM/2cBnlO8eqYzM9US2uDU3DMKDNS1qxyXnLE02Jbx2tlYawZuTr8q0UBMK6/ff48zTLz2khzcwHjcqvTOzBhpDRXDp+p+Wg82bgINmAaeHsCQudo3lxbVN2yNP2eKTqQrxXPYqgsu6EsQ9lxqWOC+zEyUhqUyOb8u3DhQtjb6ZjjsywT5QX9yNvPzZsttCG/vdxqLKsGqrF7Kc3lprIyL4hLFdYfZZ9RZvyfx/4n6/l23fJYeELnhmUOu91ueJZnYTAtIiMVERERERERERERERFREoUZKcJqilXrwO80FCS1FeqjYROeqY+Vl5CtaiaKsEyUalizQhkDvqOodeJWP5Widt9VomjgBMtIeSf+IozUPG3QZ/W3KOsvU9Y/a16wTIh+5pXDsjLKGnj2954D6jS2215ZFDp3VH6oL44to+cLxqsXVnheTv/es1R7rOUqIqds+TyWdJ5jK8830MowZTRtfdXhucp5NQ2ynquf63plgzUoE0cGinVSJ/ws/xb9bN4YjUYpRkr7gv2llh82vYcm+iY8hnXe/ZUn1xRl1pai2vrzYNm8+nrlzavTLAxCHrgXU0ZKg7AAYxaK80ODzlgfPPW1tP6TXl3mxURlIc93qNVqZQYGOj09DUE2NHAYkExUq3I0b/09TzwvLHNRWIZJ103LKgLZaQ7U30r7Imu/XFSuPx+9GhERERERERERERER8QVCYUbKaobVptbalLZarVQYVtrU1uv1lEaMjFS3203ZK3qMVBU4S5Pv2f1naZw1Ypdqc3nPtFqzKpGlHc/SgnvlPg+U9Yea5hlFnlNFPxXVLFrkab7PYjutRizPFl/n2izIY1uV9c3yffR+q3PUi3KX9e55QOdN2b7x7l2Un+RZ8z0v8bnH+FUxr+zzq0CeT5iGZrdsfL1eD5pm4qxIq1nlrqo+RXyltBw2mt1oNEpYTgB+wk6dX3nMW5WYReY+D2vTLMjyofK+m3bdKAONrmc19hqpk3/zmpdSRP0ui+4vFsFEedY5yiJx7lA+KJOrSaEBpMLSA+lky/adEeXgyST9jNA9uJV16j84jyTqUx+kOMh0YWJhO51OcDzkVR3CbEhjnYDWOU/DglZp2udtZPI2CbXaJLylRb1eT02evA3WWYvsLItwWTOILOfXvN9N+76qUcXhZ5EOlZ5DdNFyZN2XtyB793kCaBEO11nhyYu+f1EHjyrLpSjSb/NGXt2yNjZZB69FKTjKoMihQ+tpzY2WlpYS5iFAUjGWZ45VBYqak3gbAasw0aAgWTkgPZNbrzxfNHxRD2BllWRVQnN0cbzY/IStVit8phtVPTjxM16zyjvvQ1RZJYBuwi0xoHW1e1dV9KlCJutZEbOhyPjPSyVQ1hxbzz657zzzjoiIiIiIiIiIiIiIiIgEauehGY2IiIiIiIiIiIiIiPgiIzJSERERERERERERERERJREPUhERERERERERERERESURD1IRERERERERERERERElEQ9SERERERERERERERERJREPUhERERERERERERERESURD1IRERERERERERERERElEQ9SERERERERERERERERJREPUhERERERERERERERESXRyPvy+9//fsjWW6vVCj+0aJLfMs88C3/wB39Q6GF/+Id/WHkG4nnVd5Y6sUx65fuXlpbCtV6vJ343GAwAAP1+P/w9HA4BINxbr9fDM/jM0WiUeqeH73//+4Xq9Md//McjAGi1Wmi1WgCARqORKP9gMMDJyQkA4ODgAADw7Nkz7O3tJT7jPQDQbrcBAGtrawCA9fV1AMDW1lb4rNPphHqyLv1+HwDQ7XbDtdfrAQB+7/d+r1CddD7lwY6TWq0WPrPfLS0thc/YX7yenp7i6OgIAHB4eBiubA/2K9t1ZWUFW1tbAIA/+7M/O7f5NC8UmU9V1ofjtF6vh7nDzziuBoNBap4VlSdF5cPv/u7vjrLexXE9GAzC33pVeaDX4XAYnsErMRwOU/JhaWkp0R72yjH453/+56Xnkp0bKpNYNjs3AKDZbCauKgttvfV3WifvnQT/Lirzqhh7RcdOEeStV9OsTXlly3tXlb+rok5/9Ed/NMp6Ft+p80PHnn6vz6jVaol12V6z5L/37mnqNO3Yq2K8zWNfVHStfR5QVD5Qjne73bCGcz+i8on7pdXVVQDjvc2FCxcAIFw3NzcBjPc6bH8+a39/HwCwu7uLnZ0dAON9FTDeU52enibeSbnZbDbD/upP//RPC9XpT/7kT0bAeA9i93eE7vO4j9nb20vt81iu4XAYysTyrKysAACWl5fD/o7fNRqNcP+08iEyUhERERERERERERERESWRy0ippjFL6+dBv8vTWNjvqmSopkFZDZbeU0Qzw3vOo57KGBHKLFkN8mAwyNQ4q/ZsWk1iUdRqtYTGQ8tar9eDVoTajFqtFspLxkgZKcvCUDuhGh0+n/coVJNfZT96z9K5ZtlEvZ9lt/U9PDwM2iVqck5OTkIdqJGh5mp7extXrlyprE6/aJhWPuh9lpEajUah/zxWowqo1tIyLfodxw+v/X7fZal4v2XQ+L9q0D1WjvOK12azOXWd89habVuWezQapZgxZaTsXNL68vkqa2x/RmQjb3wvam2s4j15jKS+h2NJNew6DvV/vc9jbq3G3GrtbRkWNR7P451F8LyWaxZ4VgyWMdf9D1Gr1YKM45rPfY8yUoTKN0LfR9no7Sftu8+CZ7Fg37+0tBSeS9ZpaWkpNYf4Xb/fD/PFY37tOqRy3JarKHIPUkTegUqFiQdPwGRBv1vkYaNImc4qT5lNkJrZVQnvmWpewLLpQLKbCTVD4MDkZksHmzUJnJfgUvNDDnz+r4uV0sL2IMXy68ZQzdl4L8udd5Di7/UQOgvyDlB57a3lYf14WCLVvb+/n6K9a7UalpeXASCY8V29ehUA8MILL4S/F4kic+E8F8MyyiAFx6aamNlFQmXrvOqo41//BpA4PKnZKq/2QOGZ9nmw5ntqukFZQxOUWertHaT0YGfLvbS0FN5vTTxGo1GqXThvtNx2o6soYt48D/yibBY92H7V9fM8lZNAeiyoosua9tXr9dQc0/WWY4918dZpziFVCOStIYvEeSvCszDPci1y3ul4yjtI2X2Lmk5T1vG6vLycUoTpXsqaROseyirS1BWiKPIOODq3+C7K4EajkVpD7dwCJnJc91Rcd1SBNqsciaZ9ERERERERERERERERJZHLSFFzrdpKpaKB5AlYNSRZDM00Zn/Pg6ajKItUlJmalybNC1TA91lNQr1eD9/zlE4Mh8MQqMA6pgNpdkjNYqo2UbJaP9Ugs2wsv2qJLeXb7XZTjBTN2vr9fooG9jTO1GL0+/2ZGKmsfjrLjM/S2ScnJwkGCpg4hh4eHoZ5y7qsra3h8uXLAIDr168DAG7cuAFgzEiRpZoXtC5ZbaCw88Rz5n9eYDV7XoADj12dl0kfwfGfxTrxHv5N01CPwcpjpHQMW/O9VqsV5o6dZ0A245oFb74Qykiz7mzbTqcTmCiy0ZQdJycn4X6yu6x/s9lMaEN5tfPxvBipKjHv9bboeM+yZlEzdc8kJ09m2GdNU1cd95aJ8jTmOkY4vjjHuMaenJyk2F/+r2yurt3evmvaOkVMUMZCYpFtrYGCPFNrIGnyRigLr0wUMJaHajII+LJYTZ6zTPsajUZp0z5Czes89pWgPFaWlt+ra4Nl7PTZbAOdq9Z0tmy/RkYqIiIiIiIiIiIiIiKiJHIZKWrrR6NROAkeHx8nrnpCVX8bq6lRZJ3mszRU8w5KMQ8W6TyYKc9fQO1YrSZBtRf2lF6r1UIf2zDiw+Ew5eegvnKedrashlbbxWr7tIyWkeJ3wESLwnKfnJykmDSGPO92uwnmgHXi/Vbz0+v1pmakPFZGr3kBJVgOajIPDg6wu7sLACEcKLXpvV4vtMvGxgaAMev08ssvAwBefPFFAMC1a9cAjEOien5hVcCrJ1GkHZXFsGPpedH+W988zp+jo6OEdhmYaARXVlZcNpWoQi5oqFxqxO31+Pg4wU7xarXjqhW1skt9Cy0jpfexnnzWNMEmvPniBZmx7d7pdELoX6Y+YDn29vYCm8s5pbJHHbT5bitTPT+eafC8+w3OC7o2Wf7pD8gAACAASURBVLnghRb3HMc9/8MqoX6yhMdI5VlJ2HDOBwcHiYBAem+v10utPzpnrD+ix9IuEjo3n3eG1luPyrSdZyExr7oqc5TFSHkBH+r1em6wCetjpOPIylTd7xMacKdKRkr3eewTLbf1N1YLJMvu8p5GoxHaQPeMWaHXiyIyUhERERERERERERERESWRq36mZnJtbS2lvdPEWPTNoCbl9PQ0xVjoiS+LiTpv/6lZWKQ8n7BFhkb3EuUSNuTxaDQKrCO1AGQuWq1W0KgzKZv6Wtjw4a1WK6WZLptgVKG/sVH0lAXzGDVrO2u1gPosMjuqfdf3WpZAGa1p/Tr0b3v1fGY0gqLOO2CsOacW3fq0LS8vY3t7G8CEfXrllVdw8+ZNAGN2Cpj0ubKQVcJjSllX/cyb/9YXwtN4FZ1j84SyrjaK4s7OThh7HD9kRYD0nFWtmFevsjJCGVlloOx3NsmjF7VPIxHa8qsm0Za73++n/DxmqZOGirY+nNYPBUgmp7x48SKAicygfNvd3cXjx48BTOYSGet2ux2ewd8NBgM3+hrrU7ZOefJB4TGyZbThi/SlKbqu5fnN8aqMqWVIvUiKKjOqlA86J+xao5pvro1kn9vtdigTn8FxpmyotSw4PT1N+YGo9t/6HjYajUoiypaFlV86N63/6HkizxrCCzXvMWw2NYyypLNY4uRB9wN5CcM93yXumTxGyvqNa5vYeiobRuStzWXgWTYAyQjJHP+dTiexD1RowmKNuspn2mit7XY75R+m87mInMw9SN2/fx/AeKN16dIlAAiLEDdo3W43LESPHj0CMF6QaA7GgmnhlbLTq62A/p+FeWygssyHskwMvVDYOqnKOJNPYxLiHSztJl9DWWpIbDXfBCYbjrW1tTAYHz58CCC5gNjw4Uq1ZgmbMvBC7lvKV82GdHxZ8zQ19WHZeA8VBGoGpIu6FapqslT2IKX18QQ4v/OcJ7mo0uTo6dOnAMaLrj38sA9feOEF3Lp1CwDwpS99CQBw69atEOKcm0S+b39/Pzy3auQpGPIUKXbslN00VoGi89eWVc1A2Uc2YIoGOSlan7L11kMT53TeQcoLzuKZ62aFAdcNlH5mN8ZeaPRpYA+wahpFuUBlweXLl8MaxjbgnPrss8/Cmsfy8N6trS1cuHABwGQ87O/vp0wfrblVGeSZ9+rcsHJVx5A96Op6YuVoloJjUVATPFunWq2WUtZR7ne73bCGWdmhZjpq1pPnNlC27nadAJBSQmjeMW74VldXg2y2ZTw6OgrjkPspT8ZbxUYWpl2bZoEdXxpW2oahPm/FF+DnLlIzOC8Nig3M4IUFV/PnWZTJFl74c3v1UqbU6/XUQUqvWSH8R6N0Hj41MyVUQTZLPb3xAySV43z38vJyQomvUMWzRavVCooNDabHZ9h2LCobomlfRERERERERERERERESeSqAWmy9/Tp08BK0ByIDuvXrl0Lzuq8//79+/j0008BAE+ePAEwCcncaDQSLAbgO5V5WKQWw2pORqNRSjurWjCe8C09OhgMUmYL82TZsk71LB8w0Z4dHByEPqDGi/dtbm6Gen7++ecAxhpbYNzPrB81bCsrK4kEZ0DSzGXaYBOeCZEXYlbNJyzjyXKohp1lpGmFOtt74aqtdmoaRkq1HFkaj+FwmDL72N/fd5koIBlAg6HLGc781VdfxRtvvAEAgZm6cuVKQhOjz3z48GHo61mQFwzC+7sMc6mmwXpdhEY97x21Wi0R6ARIBjOwjAvnmZpyzqsOGjxCGSggadpqA1Co9jEr1DnroJ9paGZ16LXm3vq/1SqeBTUnUeYWmLACtVotsK5XrlwJV7Y917S7d+8CAG7fvh3mFdc0Wl5cu3YtsFpkQ5RptEnLtV+LQtvPS/BKqDxjG2QlerXPBZKJKK3D9iKZKY9lU0d2tY4AJm3R7XaDCSaZetZXTX5Yz16vV6lJmTrnW6aWY+Pw8DAVNKJWqyXYKWBi4nvhwoXAeFKO08LnyZMnQf7z+d1uNyU/bRCA88LzwDZ5yDMv51VlkXVL0d95ibut+ZkyN1UwU7pOWvaLV11ndM9iZa8GXNB9rNbTY6SUebP7q1ldOIgsecW6A+P9pu75tO4aWM0GEDs8PAznD7XAsMHWsvYsWYiMVEREREREREREREREREnkMlLUqO7u7gbtHW3I79y5A2Cs9f7qV78KYMJS3bx5MzBRt2/fBgB89NFHAIDHjx8H7QpPxzwhLi8vuxqyLIfVWU/ARbRveg9PuTYceLvdDlokai21HlarW7VzvOdPYjUtnU4nkUwWGDMR1FrQLpvP2NraCnVnn7MPHz16FNqA183NzVRgAGWmytbXYym8xLzWjl41kpYx0gSd/I4azePj4/CdahmtFstLpFoU6kxpf6taZnVABsb9RNaIrC81z51OJ2jNOf/IQr3xxhuBiaKPowYR4TPZv5999lmY50WR16+qhfNsrW17e+wH+9xjNlSznqfBXwSWlpaCVosygG3e6/XCPGHbq6O4p/GsEuo7pP5PQLI/rBbPm7c6hlluT9tpEz8uLy8nQr7rd51OJ8GYF4HKB+sbpVpL9gEtKTY3N4N1xCeffAIA+M///E8AwMcffxzKxDnFwCw3btwIz9WE1zbAC+vh+UGeBZVvNk2DPov9xHr3er1QDptIeDgchmd5juZEEauQWdYsT5bqWOKzlRXlOOGVa+xoNAoyiww667u6uhruV9+NPK35tPXyxp5aYVC+egnSCSZHX1tbC8wb9116JTvFdXp/fz/1Ti3XvGRJXlvZtA9Aes32ynUe/nlEHiNiy55ngaL+UOpnU9YaKQ/6fPsuj/HyGCnrI9VqtVL7Nq9OHiNlAzPMwvp6MQi0/LZsKysrgdW1zJS2u03I3mq1EkmwAT+9gLJbMweboDnE2tpaoNIpwN5///1wfffddwEA3/rWtwAAb7/9Nl555RUA44MWgGDq97Of/QwffPABgLTZ38nJSRCCStdlHaBmnYB5z7Ebsnq9norGde/ePQDjjmBbceHlQqwOl54DcJVOiHpAsxtQYDLg+O6Dg4NQJ9aFG/V2ux3MWxjxjY7Xn3zySSKPETA2EeLAts6a05he6UbJtpHnhOgdpGw51AzIOtsfHx8nJpaWQd+pG5uyDvLaHtZpVaPyWTO+p0+fpkxY2NZXrlwJgSS+/OUvA5gcpG7evJlykD84OAiHJZpqcm4+fPgwvHsa2HGuJic2D93h4WHCDEa/6/f7oa0oB9TMR81J9R7Fog5Uam7KcnHucxPf6XTCfOGGSE2TZjGNKAI9oGblH8kKWGMXMF3c8iJB2U3w8vKye4Di78ua9qkTux4agMm839raCnKZfVGr1cIa9tOf/hTAeE0CxusQZd7rr78OYDKXtre3w7xRBQfHLt/Juk0TOU03cNasxXMAZ31PTk7CGkqZwXINh8OUORkP+howRA9SWfmb7N+zQscZ/9aoknZe8zC8vLwcZNaHH34IYLIOra2tBXM5jUirpvazguXS9YTtops0vuvBgwcAxvKVex6OId5/48aNIKt5gFJlhJ0zOzs7qWfMW45kPddGcuZh7/DwMKWYsME27HOrktd5beCNZ1Uo2cOxKux0HgLJYBN5h5oq93mqGPAiBXrKX7snUhcFa6qn7/NMCO2BadYDFK9ZQaSWlpZSZod6kLKuQjq2rHLWO0hpFFDvMFxkTEbTvoiIiIiIiIiIiIiIiJLIVanzZHfhwoWgJSG9To3Q7du38c///M8AgB//+McAgH/6p3/Cd7/7XQDAf/tv/w0A8OabbwIYh2Gmud8777wTngGMtWnUrGkOBmuqVXUITY+ZsqFL1VyFmiBqwW7fvh0clqlVJztw8eLFFFXsOQLPajYB+CYMqoFgH7Jtu91uYCBYfppK9Pv9oMUlI0Vt7Ycffhi0T3qlJtAG3PCybZ+FvBCinlO7jhdqwbw8A1b7ooyU57Bt36ka+WnzSGn4Ug38AYzNaKlVZt8cHx+H39qAEq+99hq+8pWvAJhoz1966SUAY80z247PunfvXuhrZaKA8bjOChtaBtYcQE2PVJtP7ax1pNY8ZZr/BxhrNvl8zVfihW4FFmc2Uq/Xg2aMLAiZwu3t7SAXaBLN+j148CDUu4i2fJpwzXlmsjpHPfbD3q/mfFmmYuoIrCyUNcHQ8LU2UMdZ0NDydk5zzFy6dCn0BWXB48ePgynfT37yEwATNn57ezswUd/4xjcATCwqTk9Pw9pEGbmzs5My6VOz02lNf/VvK3eUoWe9Dw8PAwtA9oNzSkPAcw5pzhQ17wOSTJrVutdqtUq0z7ZOyu6rvLKmlFy/rly5EmQX9xBkeJ88eRLWLY43L6DLLOutmm/avtDgI5R1/O7+/fthrHEMacoYmmbT3I/l39jYSJmWa1AEvkdDpC8qX5MG4eKYo7XS559/HvqCa5QXZGdR8HIiWQbCY1y8/Ya95uVHtX9XUX47N73yahlteZVl85goPtvLV6XyYFYoC5UVkENTIXCdX15eTgWj4f+dTiczl6gXcEkZKd7nBZ3IQ2SkIiIiIiIiIiIiIiIiSiKXkdJgCtQkkJ2g1uTatWt47733AExszf/mb/4msFQ//OEPAQDf+973AIwZqrfeegvAxJ+IGox3330XH3/8MYCJlmUwGATNiw2XXnX2bk+LzZPwxsZGOPHySg3MRx99FDTNhNrK834vgAZRlTOitZ3lczudTmCMlDmiTxQdr9n+Ozs7oa+ZwJX/b29vBw2o2rTreAGq8ZFSDYjHTNkwy+12O2VPrsyUp6EAkglJ1Yk7iw3TZH5FoVp0vovtr35R/JvlaDQawX7eBpR46623ghadjCFZAA0TTA3u3bt3w9/UhrIMs4TPVRtnL2GoDVF9cHAQ5jjHkmpwbWAUTQDN+aT2z/bdqlVflObT+puwHzY2NsIYtKywMptFQuROUxcviIHnd2jZbPWX0eS5LL8XUIJX+1mn00lllNfwwmX9DW3YX30u5dv29nboAzKit2/fDkwULSH4jFdeeQVvv/02AODrX/86AIR59+GHHwafQvpY7e3tBW2oXaMajUZpViDPX0DnEse9+h1yDnMuke3tdruhTPydJinmc7V/rTa3Sv8ihefDwXGgdeLaRNnxyiuvBIuPf/mXf0ncs7e3F37HcaC+tFWEofbmrjKGwLjdbRL1k5OTwBhyDJGhunfvXpDp9C+nT9j6+noqgbz2hZ2jx8fHCw2FzrJwjrFOd+7cCWOUVhJVj6Ei8OYQkFyHNVgL+836nmn4esoVZUMs4151egGPvbHwZLYXGMgLYuOt317i3yzZNE399Bl23+BZUOh+1usDYNw31hqJ88Hzget2u6lASyonYvjziIiIiIiIiIiIiIiIOSBXDehFe7K272+88UYiASgA/Md//EeIiPQXf/EXAIB/+Id/AAD85m/+Jv7H//gfAID/+l//KwAEhur69euJaIDAWHND219eeWrU0MFVQk/8vDYajcDMkEljGxwfH+Pv//7vAUx8huh3sr29nWJN8jBLaHTVINhEgc1mM0RrYvS9jY2N4C/D8lJL++mnnwYNGbVgZDyuXr0atE4a/c6Gsy6aaDmrLnyW1RJ40fT4LtV8W0ZK/Rb+P3tv8iTHdV0Pn+oau3puoAGQIAECpCRS1kiF5JB2jrA2Dof9tzjCK0dIIm2v7K1XXnrv8Mp2KOxwWDbDmi1ZJiWSskgRIAliavRY3TV1fYv6zquTN29mZ9YAUPq9s8nuGrLyze/dc++5NomhynFrQtIsJaRp5I2VvfOYKGDMDrH+2L8uXrwYZMwZa0if8+effz4VB8I2uXv3bmhfMqYffPBBYKKsxU3VceYBzwqnVmgrycq5ZTgcplgdtfZZ67X6eds2mdYCeN4YtAxyv98PY9+ygJq4U63TwHSpAcqCdamyyHbcqEXQi12xEtqNRiMREwUkGSk7BrXdPPn6adnd0WiU8JsHJrFAq6ur4XOc337xi1/g7bffBjCJyeO89vLLL+PLX/4ygIkFnSzprVu3gkIcWQWVvKc11JPgLVumpaWlRDJPvsbPePLnNr5T/f/5HLTcqsqhZVGbzWaKXfES584Ca9FeWloKfYj1uLS0FNg1qvxyPH36058OazD3HGxTZRRYB/NOeq1y/1ksq8bPsi09dUXOxXfv3g1xU2SrGGP59NNPp+KPa7VaqDOPSWbZHweyUoRoHFee/PkioYyCZVc01Yiuw5y/rSri2dlZKsUFGeuLFy+GeYf7JbYPkGaApqmHLFU7hcYLe+rGNjZWkSd1nsfQzOIZdh77pZ/RMjUajTDf27QByg5qUm7eU/d8wHiOzErBUnRtzj2FaAdUtyRgMhm32+0wyCl/fuPGjZBbitQ7D1Z///d/j+9///sAEAQpvv71rwMAvvCFL4TNO4MU33333UDbc2Llb+dRnFko6kKn0rLAeIJm2bnIstyj0ShMqDyIsAE1kM2jU/OesSw08NN2FmAysFWemR3OulG88847+MxnPgNgPJEDkwPY5cuXwwSiHdTmqCkrxqDQQZSXZ8huoHVxs+5FjUYjTPJW7tLbjGgguxeAWraduHE7PDwMEzcXVk7WwGRCoGvHzZs3gysLrzxYbW9vh7Jz4udC/N5774UDFA++u7u7KYOE0v5lZagJrQvrGqRB7VyA+HtaXo610WiUOkhpThW7UDUajdQCsegF2y7OvV4vlZ+Ldb65uRkWYI4RtrtudBd1oFI3B7tIqWuFrUPN2eVJndtxpocnm69EjV42cHiWTUWlUgm/YV08lpaWEu7XwNhAR7c39kW68X31q18Nhgo+I9353nzzzeD2zI369vZ26IvWxW8wGEx96FD3J+tSrBsBPYhYAQ+ODX3Pk0C2h9p6vZ46SBFnZ2czzem2f+v9+ftsk7W1tVR6BoYOfP7znw/1znmQxtzbt2+n3Ii99AizuNKr8Iq6GgFJo4I9ZKnR17pgHh0dhb7K13jIunHjRigf191ms5lyy+TvPA7jDFGpVMLv8yDxwgsvABiPDz4v31t0zrwy8NzRe71eyu2eV3U5Zx9TUTJvLM1TBCnvMKO/4+W/yjpAaToFe3BR4Y28NWqWcAevTN6Byhp1arVaGF+c73UutpLomr/NnmWycijy+aJrX0RERERERERERERExAKQy0jpqdmTXAXGFlVaWWlNunDhAr72ta8BmCQ2ZNLeH//4x4GG/4d/+AcAwE9+8hMAY7c/fo9WjevXr4f70sJLN43Dw8OprX7nudBZ16/9/f1g6efJl4Ibn/70p4M1mhYYsg+tVitFyc47AN5rJ57A2TbD4TCczvmMly9fDiwTn5e0tkq62wSBFy9eDN8jk1KpVFJs2CyiIB4b6iUTVdYDGFsqsqyzmqzXWlhUEEGZKf5tgxenaUOyEHt7e8HKResX77uxsREYQE20y3Gk0uYsB9uMFlyyUO+9915wF7EuZQBctmEW1z5PIp6vW5n61dXVFGuqVicrbqDufLYt1DL8uCydnuwx25dty367urqacKflM9t7LQrqApQV7O25hDQajRSz5DFS1jVTXXqU/chiDafxLCC0P6jADzBuE1r+OSbu3r0b3udY+spXvgJgzEyxfcgGcN166623AlPAMm1sbARLu012rq4qRaF14LkxA751ttVqhefmfM8ynp6ehvrhZ3jVoGxlCbPcY6dh4T1YFkAFpVS6nusO1ya6+7/11lvBc4X1zznz8PAwPL+uR/MUplL5eMtCqIsin5+eK2tra4G5pOeHullzX8P5kHusg4ODwHLTDXVrayvV55ShXETIg7fPACZzM72I2L+03u188Ligz2znH507lN3g87P/qGCBdSNW18V5uO8VhV1rdYzaedxjinQOydtf2X1SngBE0eS1HjzXPs/FUNd+O+9zbK2vr4c25L5H98GeqEaea19kpCIiIiIiIiIiIiIiIhaAQoyUZ31X5sMmo1tdXQ0nQgo0kL158cUXg5WPCXzJfPzzP/9zSJb48ssvAxhbCRkrwnvRArq7uxviQqZBnp+0FSXodDqBkSJoea7X68EaQ59tWkKHw6FrHZpnwlC1RtiAQRVQYL1p4lDWLS1eZCzu3LkTrLhsO5ZDLewq1W1/exapU7VWZsV9qRgEUa1WU5YKFUhRP3IgKVtrf0dlMfmejf8pA9bV8fFxSoqYVsurV68GJopW1+effz4lbc52ffDgQUq6XgVEOD7Up5v1Y+Ne5iXeYgUNqtVqKk5BxTo8piJrXHgyqZ5c9LQo+n1r1To9PQ1jnjEdZKja7XawmDPWgeOn1Wq51vJ5WjPZZ7RsXhtZiVwViPDEI7zEuvy+J7Nr51svkXhR6PPbZLgq7ME5m9b+4XAY5jwm3WWM1KVLl8KczpQejO394IMPwn01xpSsA3+bDLaK5BSF1ofGRCnUZ19ZEDIzrG8+V6/XC/2LFlt6JLTb7dR491ixWRKLensIT46af7NM29vbYb2nuAfZm7fffjshTMPPA+M24dyoa8g80osQmgDainXo2kN2jXuhjY2NUPe8cq64e/du6Ht8fpZXX6MHyM7OTooNtRLvi4QXE2sTpFar1USMOYBUfN/jhPWUyBJOsPG8rPPBYBDKbUVmND2OsrweYzQrPJbNS1/gxUXZsTcajdx9Fctr5wC9lyezPi3zq+u6t9+ze0pdr2ys1Pr6eip2VcV3vITjWYJmMUYqIiIiIiIiIiIiIiJiQShkfvZO0Wrh4YlNZaPp18yTIU/3V65cCewNLe70fX777beDFea1114DMLayU0mJkqe03m9ubs7F59azVlkLwnA4DFYVxmrx/5WVlXBitgpSqhbi1eM8mCnvuzxRq+S2jSHY2toKTAfjbDSJIK1lVjGoVqslrGxA0jddLYHTlk0tJx5TxP+tTHClUklIoQMTi3y73U5JxrIuVK7YU3Oxv1PUUqEgYzsajYL1hHXKZMfPP/98iA8ku3nx4sVg5aLVkhb2W7duBSaKMVLaltbPW5kEGx+hil1Fkde26jvtSbLa+AJtC2s5y2sTL/ZnWovntJZrVT+yqmGj0ShYNW2KAGUfsmIQvOcrA85FyvRZ66zGC2g8lBcbxf8ts6nt6CnyWSZKfeHL9ru8BLKqusW4Jr62traWiG0FJvE1Z2dnYSyRiaIKa6fTCfXIdAPb29thTlUZfJZpWkY+jwFSZkfjNaylnOuu1q2NQ2q1WinPC7W623l8mjmvSDlViZB9aHV1NTBSNnbi/fffD+uOncvW19fDGpbHPBDTpBvxWBUv/kbXHZaJz62y2cB47uacTiZb2W56jGj8F/s012Jlpp6EKp6XIsGOSZuW5XHAzq9ePJEyiWwj3c8CfhJknQO9dXURsu/63Fa5UWO1vN+28UdnZ2eJ/ZRelSXS386KdZ0mRkrXhKwYKV3z1bOGv8U5Q2OlOCY4Z3PvpXXn1Uuet0seSvvxnHeo4kPYTqiHDivvTFr+xo0bwS2Jm/jDw0O8/vrrACabR27sL168GBb2eSDvQKUbbW6KWLajo6NUgCtRrVYLNcQsBypdWLM2oCcnJ6EzqYsfNxVsC7pRdLvdUN/coBODwSBFp9py6zPMMmmq20+eGIS6qHiTI5+Vi5q6ZwDJ9tXfs5OMPte0bi6rq6thAeUBivlQbt68GV6j6wYwcRWleIS68/E1foaL1tnZWUqsQQU3rAz1vAKyvYOB3cB7hyud4OymO29cLMKV5bxDjS1Pq9UKC7DN66WGB816z/ssetPDxSTrQMurJyyRJTbhCUqc14/sQklMIzahbiWsP+vOfHh4GOY8PuPOzk5KMptlunfvXshZROMe3XEbjUYYs5wzV1dXw3PY9W4wGMw07xVavGXOsy5mnJe1Xq3rpn5fN7j2IOXJIZdF3oFlOByGeVbljW2KDV1v6bLJccU+1Ww23QPsPDfuugn13FQJe7hdXV1NiO0Akzn+woULYR6nIYwHKhVt0L5t9yusw1artRCxifNgx5+mRMnKx7goFDHw6aZaja+eexev9vk96W+9ztOlTw8u3sGdVzVIEt7ehv/btC9eaIZd7+xv8r2yewgdN1mufb1eL/QjFZuwQmNqOLe5pdSITXipHLyDVJE5L7r2RURERERERERERERElEShhLxFTveKarWaOtnxRNjtdgM7ZanvS5cuBasu3S3u3bsXLO1kgmiNPzw8DFareUKtZ2pRsNYInpw7nY6b6EzvZ1/LwizS6J51m1aF09PThFQ5MLYk0CJGK7qKMNA9k+6Wak1XSzwwblfryucFT5eF5zKl0u58Ju+3LOWrrn3qcsm6IPT3PEqZv1fWOsu63tnZCawTreN0W71y5UrKLfTg4CC4k9oklffu3Uskd1Uoa6BuW1kJUadxc/HgWe3y3D2tm6U+h5UjVZcpL3HpIiyeeSy8yt+qWxIwma9Go1GCFQUmffNxuODwNz1BCQ2Sz2OkbD/S5Nae7G+WG5++VsSVMQteALVmqgeSUv9qoeTawnpRd2a68pEVIDY3N8McyTWqXq+nfpNjdhahnTx464MGtVs3R/28dQnUOvQSj3tsyzzGlyc6oomt+RmuLZw3lWmniI71elDXbs9FMm8sF0WRJKKenPPy8nIYM2TSOJ7a7XboV/TQoTvf4eFhar0CkqkXbDkep8y49X7heFpaWkq5xT0JkYksaP8vyhzZMTGLEEtZeK7yhDJTnvy6bSNln+zc5aWXsXOH/u0l/p0Gdo/A51DPIz6r7nXtfq/dbqdSD3CMKKtuPSn0GXRcFxlLkZGKiIiIiIiIiIiIiIgoiUKOtB674sGTZSSUoeLpkKdMWm7VAkqrzOrqavBJp4VGA7et5XBeyIuXsihqqZunBKt3X/WdtYxgv98PFj1lImgpZ32rdYIWAVr/WP+tVitYDVQq2bI2xCwsm1or1UIBJJNNKoNhLRSe76xNwOYFrWsQmPHTMAAAIABJREFUtPXhB8rH5ly7dg3AODaDyUDJTDH+otFoBIse6/ujjz4KTBSvZGmPjo5S9a7sU15iVGvRWZRFzWPvtF1Z757V1fNTz/Jhf5zwAv5pZWZbqu+5ldv3pGkXBRWZsUyUMk4e+2Rf8wQlvDJ4bWPjSNQCOa38Ob+v91NWyMZJbm1thTmA3+Ma8tFHH4W4UN5DRWHo/cD5ROcHGwtin3ER0PHrxTAU+V5ewk1iUWyv14aEWt1Z37puaeyvvZeVN14URqNRisnTPu3JSrMsVi59Y2Mj5UHB+eTg4CCsCeyXZ2dnKVbCem48LmR5D+h7Hycm6jwUSajryYh7wjDzRB4jpeyKnZe1n9r5qt/vpxh8rx1tLJb+rTFSs4hN2DrVZ+YzspxaBzZWqtFohHmbzBTHz2AwCHWQF9tVlmksHZGYFWxnYV/P07JnA/Z6vTAx6sLNhY+LOj9zcnLibr7miaKbzDJiEfNyofLum3WA1cBPVbzS3FDARE2o0+mEz6kKFu9lXbRqtVruhnZaYQYtgz1IdbvdlDubbrStsk673Q4DzC5yjUYjNTlpRm2bR6pSqZTe/N24cQPA+CBFcQ9Sz8T+/n5KWOLOnTtBBISHK6Wqvfw/LFOeEMDjOEDx6rl7Et7CZZ8pb3F+Eou09ntgXOfcJPE19pnT09PQXnmbjSK/Nw1082Y3cHrNe80umurG4blqWTcNzQdnF3UdZ2WhB3L7m5oTRvO+8Lm5uNIw9+DBg9T6w+9tbW0Ft2AVt/BUGInH4bZpf2vajUzW/0XfK4Ii8423hmiAOT9j3Tl101hkAzxL23jlsO3v9fder5fKx8ayqUsiD1CaJ4d9lf2z3++H3/IO0U9iTvTceD/O8NYl4MkY5orAO0hZo7EauAh1M7X7ZTWwe6p9tm/pvG9du2dx7fPWDF0nPNc+jh27F200GilXejWgcwypQM+sglvRtS8iIiIiIiIiIiIiIqIkchmpIsHA07j92dfUSm1zxOgJ21L99Xp94YyUPmtRsQh+/rz76eft69PAC8BTS52lddWyZ/Nf6b144lc3Cvv8S0tLibwds5ZFYRkpPn+3200ElNvPW0ngVquVEptQ+XQVO2A5rLuEumSVtVzRne/ixYspGXayfbu7u4GJopvRgwcPgnuldSGq1+sptyu2pZfHYpYA/1lg2SOPcSz6bB9XNxE75jxJWk+QZdHMhfYPy0ipq18e6+S5PnjumkCSsVarvLV4amqBad2RPJcQQt1YVSjHjn2OPTJTwMSSqZZN24b63Pa3p3FzycPjcFN7kshjoD2RjDyGe9Hw2tVzqc9Lp8F5nP1Sc/hZF+2VlZVUSo9ut+uyoFnPNy/Mi5X8uMArT5n9y+MUm1BWiHMR217nbs8rydv7AUlGygrlaHn09+z65sm9l4U3j3vjRueALDa9Wq26qW94LztWm82mK5hUBpGRioiIiIiIiIiIiIiIKInCMVKegIDFtGyMJ7+sPtA2UFstvzx5Pg6UiSkpWxfzgrWKq7XKSmB63yPb5wloKCNkM2t7Qhfzkjf24i2AZKI27UM23kLjWMgEeQGHGshIWMt6nkXkPFDGt1arBXaPVnDGRd2/fz/IzTMeyhOUUFbWEwLgMz5pJioLefPHbwKs9UwtXTZmR+ewJwGPGbMsvwbc5sUn2nEPpGMj1Cdf/dxtbJReyzJSniVY2XFebTlHo1EqyTbH4tnZWULOHkjK1du4HBUR8dayj5v1nfi4PpfCmwvKzmWLFnfyoGugJ4rD/uIxUnZs6u9ZSXdd57ISXX8ckdeuHwfkMUxPeg1VZpZ9SuPd+X8RRsrbQ1n589FolEqroIyXjc2eVf48S4xIvRl0PrdzrnoiWaExZaTs55XhKxKv7SEyUhERERERERERERERESVRWrWPyDuxTRs3lRUvYC2eeqJ8EtaMsszUk7S4eO2k9ZgV19FsNlNMoFoIPN/0aZWj8p77PPU+q9q3tLSUsKgASVlMWihofVELimWkstTH+N60SoTHx8cpifOHDx+GKxPs0nd5OBymLE9e3MuTUOabBb9pz+SNJY1TsLF5hCp4Wbnpx4Gicr62r/N1vUdeEuQ8xTJV5rO++LOo9insXOapmOqz2XjcpaWlRHwhkFRVs2XS5I52vvo4M1JPGnlz0rzja7MwL/U+T7GSsLFLmuDUY63zElzbuUVlqL1kzIuaXz7O68mi8aTLrGyMFw/K92yf9tI0eHsomzwZSPctIK1Ya+OwyyBvHHqpBDyPILs/HY1G4XOczzUFDsE6qFQqKUaqLKY+SCmKTIz2s3n3ybqXfe/jckh50gNMUUT61dswETpRZ90rL9fIec9QFHkUqyeLqZsou0ipRCgHlCebaw8kuoBZ1ybrflEEDGo/OjrC3t4egMlBiv8fHR2FiU0Pgjb/j5cXyk4CRQVSFIv+/G8y8uY3ddvJc0F60vVlN3J54+zs7Cyx2OhVN2v2np7YRNbhyr43bXkUeYcZNQLZ+c9zW1GjnXU58ea8eHhaHD4OBzD7G1m/6wliAOm1U8VK9EAPJPugd6+sTd8iD1LzRBwr5aDrvB6k9T11r9N5zh6k+BnvIKXzm9cXFyF/rs/rGfyt8Uv7js0n5bkk6oHKumhrWTz32iL9NLr2RURERERERERERERElETlN8FyERERERERERERERER8XFCZKQiIiIiIiIiIiIiIiJKIh6kIiIiIiIiIiIiIiIiSiIepCIiIiIiIiIiIiIiIkoiHqQiIiIiIiIiIiIiIiJKIh6kIiIiIiIiIiIiIiIiSiIepCIiIiIiIiIiIiIiIkoiHqQiIiIiIiIiIiIiIiJKopb35quvvrqQJFNlc1cVySz8rW99q1Ca7FdeeSXzx4s+l32eRWXoLlqmv/3bvx3Z52B2al4Hg0HIDq1X+xo/z6zSQLpe9HeYQVpf49/2CgCvvPJKoTJp37MZr/UZ+Zx8rVKppLJZr66uAgDW1tawtrYGAGi324nn7/V62N/fBwDcv38fAHDv3j08ePAAAHB0dARgUk+1Wi3c9+/+7u9KlalsP9PM9oTWia0f+/3zXvNQtO8tao5YBIqU6U/+5E9GwDgD/MnJCYBJ2/N6eHiI4+NjAECn0wE/z77IPtVoNACMs6mvr68DQLiyH66uroZ+1Gq1wveYYd0bX8Q3vvGN/yfbCAD+9E//dASMxy3HPutseXkZALCyshLqm+O9Wq2GMcz21bbs9XoAkvMJv2fbZGlpKXNcTlOmb37zm2F+sHOd/l/ktaw5QctUqVRS/WtpaSlRvqzP/+Vf/uXM6y2hv8kxwzkcAPr9PgDg4OAAALC3t4fDw0MA43EHTNp+c3MTFy5cAABsbGwAmIwrYDJ/83u9Xi+M27Jr03nzuF3/tJz2efr9fign78vP1uv1UB/6/aw21v+LlqlIOyny1hFvL1Dke0B+Wfh3kfFUdq19kpil33lrvvY3vsa/7f6u3++HPuj1u3q9DmAyvvRzvAev+iyvvvpqqXmcz6Xg/bL2m3ae0jFi3/Pm7Ly+6NXxn/3Zn2V+ITJSERERERERERERERERJZHLSH1c4J1M53lf+3/WiT/LuqJWAO8zi7aK8CReq9VSFi9C2Rtavvr9frDA6mv289bioOVRS0LWb49Go6nbzrPOKgtlLRmVSiVYUWiFpsV/fX09/E0LC60xp6engZF6+PAhAODBgwd49OgRgLTls9lsBsarTFnOe82zEnt1p4yjVwf276L1/5tgwVskitaT7YvD4TD0JVvntVotNc6UAfYYhoh8cIyMRqNQp6xj1i0wYTg4VlutVsrKynYajUapOc+zttrv6d96r1nKZNc8/d9al/U5sth775lGo1F4397Te66s96eF1hnrlGtZtVpNeFMAk/Y9OTkJLCLfUw8D2+bNZjPVnoTW9aKgrCbLZ+txOByG8hGsk1qt5s7jj2uuPo99KsI6eXuDvL7kMatlyjuPuvm4roXenlLryfYVZdP5mrJQyooqtH34Pc6d3m97e5Hz4K2D9prFqnvrLJAcZ941j6Uiys55Ux+k8g4Y52GWxWYRsIvO2dlZYdcIIN8dwjtEzLvcXBxqtVriUKXPo7+rA4eTN6/q8mA3fdrpzztU6W/PegC2VLJuYj06mq49eoACxu5UdPPgPeiadXBwENz47t27B2B8kOKCbQdrs9kMi/csZSK8vsS2rFQqqU0F62IwGKQ2XUpxl1mAPy7j8eMGb/yWOUjV6/XUGNLv2U1evV6PbXEOdNFkW3Du4nU4HIY24OZ6eXk5jFvOBepGZhdx73Cm0DGqmMaAps9h4Rn3dKORtfnwDuqEZwQ7zyg4T+jveBseO+dxrj48PAxutrwH5/x6vR7ala+pWxIx7Qb9PHhGX29Daw90uhZbNBqNlLvW2dlZah81j3KUddnLc/P37pW3b9B9xSKNS3n1VMTYWRRF63IesHskhe4peBDifLi0tBS+w3lTjerW0KJ7TFsG3YuUxdnZWWJPo2XxQjg8eIcllpfvqZusGio890CWsUhbRde+iIiIiIiIiIiIiIiIkijNSOWd2BfNTM3Txc8L0lfLiLUS57k8qKXJWtayrDjztILRUqf3zDt1azlt8C2vp6enqdfUOuu5vtj2KeL2l4U8i6r+pmWKWq1WCOS3Af5kqICJcIAKTHz00UcAgLt374b3WD8rKyvh/vyfr5WFx+Yqo0krirYX64DWIrbJYDBItXVecHNeUGdEEmXmGbWa2e9rQK/nQusJBWS1yeNwFf5NAMfhcDjE6ekpgLQLWLfbTQlRtFqtFCOl1lnCzm/ahoRnrdR7lF2n1IvACxjPu2feWua5RPNaRJxCMU9Lus7ddg7T5+Rcxzn74OAgiE2wDVlPrVYrMFH6nh2bOuYWMZ5Go1HCig+MreGWHeNv93q90I8Jry9pf/DYh2lRRCAii30q4oJuXWQ9Jt6bC7Oe8TwUZZbyxC1sGTx4+5o8b6RZxo/XRvbZPBc5YDLHcW+hzJSuU4AvxEIsLS2lxhz7tM4nReGNd95X5yvPoyOrr3gMtzcGtS4sc6VzcZ6nABEZqYiIiIiIiIiIiIiIiJIozEgVOWkuShRiEb+jll17HQ6HqQBmz09TT69ZJ361tulvzzNOTAURyJJ48Up8JgbhLi0tZQb0np6eBgsZZYL5f7fbTTAigG+RV5T1cVbLal7gN+uWVpLV1dUge7u5uQlgwkjVarVQFlo0GQ/1wQcf4MMPPwSAECvV7XbDfa2U+sbGRrhv2TIBPhMFjC1ELJP2R9Y9Y7ZY/1p22/c0MNSzwC4iNuC3BecxC3kstq2LWq2WijdUf3A7r3jxMIueU3/ToLGOKkIATBj6TqcTxgnrUWMoyUzxWq/XM2WCNQbO6+uWDZ6mvdQqmuXhoIHUXrC0x0zlxVd68QhZLMK82BsvxtiWV9dgznmUPz84OAjzIdddXldWVsIczXbWWLmsgPZZYetd13iNz2Ab2zo+PT1NxePq/G3rR+ebeewliggUVavVQvFPXnoS3UcBSYZXmV4732kfL8IMZD2T/dv7X59dkcc65dVbHsuX9VoReCygPrd6OwDjumcZLBtfr9fD3sYywCcnJ4EFVrZI2V8g2b/Lgs8zGAzCGLbMmMY8aXltzL83pu3+qlarhbGke3YbO6b7dy/G0iIyUhERERERERERERERESWxEPnzPN/QPPWwshYV73fOg+e/au+h1kdavnq9XkINSqGneuuf3Ww2Uz6ZnrV7FmsSWZXl5eXASDFOSK82Ga0+N6FqVWqZACaW3pOTk/CaxlF51s1p4fn422utVgvPz3Jvbm4GJorMFD/T6/VSTNT7778PALh9+3aIkaJlUJlG1t3W1haApJR6WahFySaiVKlbWvE6nQ729vYAIGUharVawSJkLewaG6CKUFmM1DQWsjJMddbfZfp+lpVv0Qpj593TU07j573E18pQ5cWulH2O/1fAMa2xIpyrOcbVys/6rlQq4bt2vKytraXkqZW94fyn8YpZ/c6T1D0PykhZpkXVQq18e1bCVj6rZaTU28J6XvR6vRR7oH13HgyOZWrU6quMFNcWtifnQMa16r1UqZXzMtu52+26DDIwHctW9PM2flf3ApahPj4+DuVim6sqrI3dGAwGuZL1ZVFWhc/zGPH6l42v1hhRL/7GMrA6DmaNkcpbI7Stspgl/byNgT8vVsx7vlnUALPYOR2j3KPp2mT3Mzpe8lLCkA0G0glwdd4qWybu0bx+5M1JnONPT09dpWkg2bfseNe9Pa+1Wi2Ugf3Ti5/Kw8LzSNkNo9LDVjxA5awX/Txe0LYOAE961bpV8bmr1Wo4OOmiDIw3+JpZHcjP9zQN7t+/D2A8SNTFDZi4tW1sbISBxc67trYWDiBcdDjQWq1WKkcHr5rHg/c8OTlx5Yd5LTvZ50lfcgA3m83w/OrOx7+5uPJ7R0dH4QB1+/ZtAMB7770HAPjwww/DQk202+1Qf9vb24nrxsZGuH9RaN+zk5C6BnEw89D08OFD7O7uApjULdt5Y2MjlPfixYsAJm2omz8vyHcWNyQPWYYRXTSKBLp7LpDeBjXvNfv9ovAyp+fdU5/Zbr6JwWCQ6drniQEsyv1yFpQJ/vbab97g+NExaF3BDg8Pwyac40DFWfhdziEXLlwIRhhvjbIbFHU/yxKHKAPP/YRXPmuz2QxjX8UyOOY9kSPrAqOuMyouxKsaD+01Swb+PHiGyzxJZZ27uJmjC/vR0VFKKERdrvk379nr9VxXTV5nWYuz5i5vjlejF/sQ6/bo6CgloMG+qG75WXmoZoV3gCJ0DHh9SfuOvVohGO0/Vqyg2Wym6kxzgbGti0APGlkHIi/ViHeY00NWVpqbrDUh6wA/zZ7Im1ftPAEgVedHR0cpAyz3ctvb28E4bNO5qACKCr3YOtODWNmxxL2LV3/qBmoNPnmhJyqUlrXu6u+cnZ2lcmh563oeomtfRERERERERERERERESRRmpIq4oHluO5aSVnrbJj3sdruZAdtFfrMI8qhrQk+onoVMXUf4DNYypuwPWQ0+a6vVSrkyzGK5VVcHK75AK8PGxkawPCi7YgUZaJ1VdktZKmBsHaWFlNbfTqcT6kdFKQBfRvM8eK59Nsna8vJyeG6WY3NzM5SBdUpryv3793Hr1i0AwDvvvANgwkzdv38/WC1UPp0sD6+sw9XV1dLBr2oJy3LFGQwG4XkpenH37t3Q51h2tuHly5dx/fr18LzAxOqyv7+fcu3r9XqpsVk2+ZyHPJc9z+3NY108YQU7RtWKOK1bRR7y7plXT571kVAWMC/ZYJ4VbNGiE7PKDc/yW9P8jq4r1kqvctlkmemacnJyEn7fulC12+3UHKnWURvUfHp6mrCaalny3G+yUMTdTOcODZq2HgUq3+tZeIGkxZnrVaPRCHMM39N5IitpbBY88QXr2qdl0j0B5zwyUWzLTqeTWt/UnZuv6bjyksoT82Z3WEbrntZqtVJzF/vq/v5+KB/3EHz+LDGORY1Pjz0BkkmD2Tc6nU7CK0Wv6u7PfsDyN5vNBMsKJIVC2Ib8TKPRKBT0T2hKgywpbGV+1TPEeolo3WftGb017jyX7rLsrnUZ956xXq+H9lIPKu4p+BrLsb6+Hua8nZ2dRF0MBoPQzspi8x6WBV9eXi7NSHGvpe7J3rzpiaLZ0BNej4+PE/0TSIboWDEOb/zreafIGSMyUhERERERERERERERESUxlxgpT/aSr1mpweXl5VSQvjI9PFVO64t9Hry4Ci+hqWXLPB9UMkHdbjecxMmGqFXGY9msyMMsrIBaDdSKBUysCxrvQwvEhQsXAtNy4cKFxHtq2WPb8ZlVhEF9mPm3ilLw+cpaMtU66zFRwNiaoUwUkGSK+Bxkdm7fvh2YKMZGUWDi5OQklJf1tLOzE6w0rCdaUJrNZmlLplpkrWVLY/EYD8XEwPwfmLQTWagXX3wRV69eTdyL5e31esESxetwOHQT986KvH6r5SXUn9qTc856Jk98RP3N5xFMbsuTx3ZlBWArPMukXrMCY/We+v9vm9jENGVinTUajTAfWFbj5OQksBpkpB49ehQsmfxNZTe8uCn+nvW3V+l1yzhOIzbB+49GI5el5G96fd2yB554jSctbS3yXrL1WWJdFVnrrcrOs/5OTk7C+sr5j204HA5D+axHwvr6emhPzv/KJs5DCMmDF9OosbwAUrHSwIQh2NvbC+VlvfAZdb1lHXriQbNA5yCPuQSSIlN87uPj41DPNi4FSCZKBpKx24yP0ZhtT56b9ylTTt5HhVtsWhDds6iohWWkPO8Ry/LqHG/lu4E0m6RjvCh0TrDro7J6fG622+7uLu7cuQNgEk/PZ2s2m6k91OXLl8MzWyGHe/fuhdfYBzR+r6wEurK1XlsAfgohTdJrY/M1ht9elUEtylIVmcdLH6Q8Fz/bqTQQzDb41tZWmPxYcdzk6T3mnefBQql3616nNDJf6/f7oQE4obPcJycnoSE4qdgFVu/lbdLUFaMs2HnVVcN2DFW442Fgc3MzHBB4YNCDA91b2F7cZNTr9QR1zvvbAaBX1l1R5C1IGliskzCfTScQYKLM93//93949913ASDkjGJb1uv1cN9Lly4BAK5cuYIrV64AQCogczQapQIUz4NuYGzQrrp4PHz4MPFsw+EwtNMnPvEJAMAXvvAFAOODFDd9PECxD96/fz81cdbr9fAcmk9Mr2XgHX5swLC6ifK6srKSWjT5PCpIw/6rQfH2oH5ycpJyJ9X+X2bB8pScirgXAv6ml9cim9M8IY5ZMIv4g53v2V8rlUrKtUM3q/MWMrFg+3qbIZ2zdaMKjMcUxxfLpAYaznE0VLCPXrhwIaU8NhwOE2NUr9MYAtmv1R2JfVw3d3a8qACFrYt6vZ5aWzzFNXX9zdpUzNoX7UHKc+VRoR22Ga9sy1arlQiWByYH3rW1tVBXfF4tk5cLbJr8RESeAqp17Ws2m6l+onsKHvo5n6sB0ZbJO6jPMs41L6TnvserFZk6PT1N9XU1mtuDroYV2LW73W6HcvL5VSGzjDHWcwm0oQqqgOlt2j0FzCyFwm63m1KRUwOyvWoeraLwjADWpVjdI1kOFdmicVb3sLqfAoDnnnsOAPDUU0+l+kCv1wvftfn7dG9RFJ4IhG2vVquVOlirgci6/XW73VTeTRUg8g5ZVoWaZVPDUh6ia19ERERERERERERERERJTO3a5wVjaU4EBojyZMhT7PLycqAOaXnxGBU98S9CAlitfpbyXVpaCs+mLAxhWQSVW7TMi2apV6uHtRJ6bFVR0OoDJC04wORkfXx8HOqRFtl2ux1c22jRIyN1+fLlwMxYoYW1tbVgIdBgR9aVZSaKZof2UK1WU7mi1MpFa5ZKftOC+cEHHwAAfvWrXwEYC0xQXIL9k1hdXQ3lffrpp8OV9cLfVkndsowUoSyrjhlgzEjxb+LixYv41Kc+BQD40pe+BAD44he/CGDcTnQJYbv++te/DuVlXbB+NjY2Ui4LapEtO9byMoazP6hlUuX4NUUAkGTINKBf6+fg4CDVl9Ttahb5aUWe2ESW4AVhrdSe2ITn2ufJn9t7LtKtz/6mihjQa4AWzaWlJXzyk58EMJk7yH7ev38/5dJT1jXnPKj1V5kZYGJZBZCS7z08PAzzAuc+ZWhtkDvngtXV1VBOtS7bwG61jpZleHmPvHrSvqeucdayroyUXd/UAyNP5j2vrxaFt67ZeUf7hu4XOEdz78Axvr6+HhgOuza12203b5Zl2RY9njz583q9Hn7Xk+pXiX4gyUhZga5KJZ2+ZRaoRd4KkCj7ZFObNJvN1DzO8be9vZ1yjffSh6isud1bsX7KupV664DtF6PRKJGrkfDc9oBxm1nGQt3KPPYpS3Z/Gug+2/YR1uXm5mZCEIu/yX0A52/ug3T+tHlGL1++HDxyyJbq/tZ6P52enpZmd3VsWyEYHaN5cvjWe0y9hey+/Pj4OBXucHh4GP5W9pVlKsKERkYqIiIiIiIiIiIiIiKiJGYWm1BLkwb5Wp9MWiZHo1FCGABIClIoK8T7E/NmpryEcHwOvsZTurIwXvJD/m2tit1uNxWguby8nPL5zPLLLQJaedRyZaFSljx9azwOrclsr48++igwh2Rq+L8mrlQ2wTJRKqhR1vKn1jgVlwCSwanar4CxdYHxTxSWICP13nvvhTgiWix4r52dHTzzzDMAEMQbLl26FBgUm/la2dNpoNnDgWSCRpad7Xr9+vUQE/X5z38eAHDt2rXwPLSwv/HGGwCAn/3sZwDGjBzrndb0er0e2sX2vVmtZTY2iHVVrVZTFvtutxusZDZAVS1KnnU0L1jUxkCUFZvwLOheEkY7TrMk0Xm18SZqfbNWchUPmPecl2eJtwzrhQsXwph4++23AQA/+MEPwjN++ctfBgB85StfSbz3zjvvpOYAZRnnUSZaGmu1WiJwGpisNdvb24m4XSCZnJJzHcdPrVZzZZeBsUWd849aoy3ToXNC2fGkjISNwVDruFePto8qQ2WtucpaWXZaRWDyJP7LQtdIu05UKpMk5KyDg4ODVGwUsbKyEuYzMh2cxxuNRkr8wGunvFijojiPOfTkti1roRZwa/n2ZLpVkGSe0BhT6xVEeJLl7XY7FafG/cLOzk5oH7tfUOZC64LMh53/lUkoAtar17Y6z9pxpgmGbayYJrC27aheB5quxfZ1ZSnLeulo3LCNLeK9NGUL2aRmsxmem/sfTa3y05/+FEByr8tnZXuxTQ8PD0Ndce+obFtZLx3uOzXOk7/J69raWjgr6DNarzEVQ7NxVpzXV1dXw56O84quCVYmXsUp8hAZqYiIiIiIiIiIiIiIiJKYCyPlqX7xFEerH0/EZ2dnwXrE0yItGXqaVcvwImKkgLR1VpPZWsZoOByGk6z1JW232+HEzJM+y69WB89nm1ctiMeIAAAgAElEQVRlB8payOiHrPK9ViVNLY2aUNgmGaZf+oMHDwKryPurmh1fU2U/1odV3lGFr6LQtqDlwVollpeXw2/QgnD37t0gbU4mikp9Kt2palwA8Oyzz+LZZ58N5QPGVjRaOSzLMo2ke54ljP9Xq9WU//+LL76Il156CcBYSQeYWIHeffdd/PCHPwQAfO973wMA/OIXvwAwtpbxHiyvWnKsXLSmLSgKtUaxPrwYIU2kx3Jb2VIvgbMdoxrz4flLe/EgZfy2iyTk1cSMamXPGreePLUXk6J1OU/VvjyoX7mNiVleXg4KTpzHOaaOj4/D87JPcl7Xey3q+TluTk5OUrF5ai3nnKWy2tYfnnPe7du3E3M6gIS0OsvHeWhzczNlvdcYmLJl13Gg8bd6VcZA1xAbP6LeFna8eInVVfXPMtbKjEybKkH3CZaZ0lgVjRW17ASfZ3NzM6w/NoZ1aWkpFR+hjNTjGk9AmoWoVCquCimQlDO3kvRZamjznCO0/3jy0/xfFX+B8Rpl9wfKQtkUKhr7asfh3t5eGIuM+2UfKMoMEGQ6BoNBKl5c696q0ql0th176vngxfHYdtOxpGOO//PvotB+ZD1a+DytVivESLEdrl27FvoP65z1fHR0FDx4yExxnms2m7h582aiTNvb2+EelkXW5ygK7jFVOt3Wme6vlZGyDJoyVMpOAcm5hq+x/r14Rh0DRfauM4tNeDKfOkHbnD4ffvhhWIzZ4Ozg1Wo1tRnzfnNeE4cNKtRBYbPeNxqNVMC8Ni5fY6dkR+31eqns03o49KSPyy5WGmjL57X06MrKSooC3d/fD4PCUtadTieRewWYULkPHjwIbn56oOLkyo6th7iyh0PdDPF+Voa9UqmEyZUT561bt1IHKAaVdzqdMFD0AMUrXfr08GGDg3Vy1TwRReDJDvM1jhOVL+WzXb9+PbQxf5+Hxe9973t47bXXAAD/8z//A2DSXltbW6FNNE8Y28XmStPnKQp1MbCBwmybo6OjsCCqpLFKUgNJ1yYr0a75R1gXmjuGfYP9n5glmNwaiPKk3vUglZciwh6atC94+aMW5eLnwdZVt9sNGxqWk/L7BwcHIb0AXUr52bW1tVAmdfueZxlUtpv9xlsE2W942Ov1eqFf2lQVx8fHQajFHqgajUaQRFdXY7sR1nxwZeWNtW/YTZ/KTnsS1Pb31XXNuq6reI+3TthDpH5vWnekLIMry8t61LxfduOsub24aeccoAYv6w6sByl1u7LPOE+osUXHgF1H2L66wctLB+G5/84Dunm140jnXutSubOzE4ypbAv2n1qtlhINUoMt5w/uKx49ehTWAu5LdHyVKS8NP95hidfj4+OUFLa6jttUGvr7XliIPSzpvscestTltij0YG37j+bGY3txnXzuuefw/PPPJz7Hufro6Ai//OUvAUz2rq+//jqA8YGK5eQ+r16vh7XWjs9pxLfYBzw3Za0z66Kn+yR7oFpdXQ1zhT3Ia846wkuRomeaIv0uuvZFRERERERERERERESUxMyufQBS1uP19fXAWNDqrEGwmjBUv9dqtVyJyqzfm8bC6UkS20zeGtCrLmY8AfOqVksr/amJz3h/DcC21qpZrLX8baWLrTucWu15cn/06FGKSmfb9Pv9YD2yVlGVpqU1aWdnJ1iraKFSF0gVnigCLQfLYOXVe71eeA4GjL/77rtBZILZvGndqlar4dnIPtF16dq1a8Hqom4ill0pK4up8FwxWBZ1S7LUfLvdTo0ZsgCvvfZaYKJIk6t8KVktWuTX19dTUsNeItiyZVJ3RSsb6wXiAmlREh0LmiQVSPZjjjFlhm1yXxWtmca1TxmmPEZKr9ZlSdmkPAY67zVC+848Leja3lZ85PDwMIhMcAxRWELdz+gSwj66vLyccsn0koXOMuepFDD7seeeoXL7APDMM8+kLNQsx61bt8KcR8ZX3UZoIeVY0mBsK0ShzEhRqCuYJwIBZMs5e4kkCStAwfGm1npPxpnrIcfUcDgs7aLtJbG20GT3GuztufQBYxcyzo2cF3hvL2F3r9fL7GuziE14yEuHoO7MluVQF0xrRVehFm8enQdYt41GI+EGDkzGztbWVop9Wl9fTzAwwKQPapJhrtPKQtm9x9HRUcptddokw2S2lGGy/ULdBT3my87nyu5aN2JNEq2MrhVEULaxLCOl7pFWrERDG3hfdUHmnMU0KmyXw8PDUF7uWXl9++23w/Oz37Xb7VAvLKc+S9k5nfWv8ud5SbOVAWSdqpAEr3l7ddt2OgdYL7Ver1fIsyAyUhERERERERERERERESWRy0jlSeXqe5ZdWV1dDQH7PNnRujEYDMLfPAnzVK3W1iKBlNMIUehzWyZKgxDtqVUD2nm6VQsirQBWwvHRo0ehfMpMeRLq00J9Z/m8lpnSOC5ak3Z3d4OFiFcNQqS1gM+t5bBMzeHhYbACkYVUIQpaBopCn99avGghODo6CvFPtCD/+te/DkwULV2EWmaUiQLG7A3rR6Vmrb+9Wq7KxkB4sBYuZT7Zrp1OJ1hqybyRBXjzzTdDm7GeyEK98MILoXxkC5eWlkJZrAVoGkZK/ZmtPzjLtLKyEvoc5wW1QFn/ZLX82Xupb7knKJGXKLcIiopNeEmNPZl0+wweM+UxUnnz3yLipTRYmvfv9XqBAeVrjE3RNmP/07icxwWNO7ECP/V6PdEHgbFVnWPCBpMPBoMwn3DuUGZKLZjAeH6z1lm1qJdlDZSZzep7GpPsldNa9LWdPPEDa4FVCWPLCA0Gg9IJN/PSCWiMprWsK5unsVHAuA+StbfxnicnJynmQdvCq895IG++UaEdG1PHz3veJGxT3asoI5U1R0xTJq7VGmetaUZ4tUJFygDa5OkqHsGrxsVyTfNYVC958zSeBbrXsvHIGkPpsSD2GVSy3MaxNRqN1Gte0lhds8rOk/rbNt2Cioa9//77iWfUNDFsUzJTe3t7YZzQy4X3evDgAW7duhXKB4y9ZOw8wvJqnHRZ6DzuseuejoH1QOD4USbQMlMaA8qrtpO3jyiy3kZGKiIiIiIiIiIiIiIioiQKxUjl+eWPRqOUT2m9Xg8nX4Inwl6vl5I3VKvYtBbkorAWYb0HT8AnJyepU7daD/jcqrRGi45VBVpeXg5MjUrAW+v9LMyU1r89pWtda5wJMLZO0LLH51dmis9NC5P6/1vFlsFg4CYZBMZMEH+zKDTmhXVkrTD3798P1hdaTj788MNg9WK90KJ25cqVwERRgYsM1ebmZrBKqLSoJmbT3+73+6UTbno+v7YfNJvNhLWP5aSFnEqEtJQfHR0F6wvjBqisdvPmzYTaDpCMgbCWwGn87j3/ceI8didLDtlLbkvofJPH6swqDeypjOnzWlbAY8XyrN55yXe9xKePQ7WP0OfNUjNS6/Cs8RrTzHnquWAlpZUltfPr8vJyGCc24abG0lgL+q1btxLJx/nbNqk8raLdbre0dVaZd1XA1bKtrq66SaptrJOObds+2netJ4WX+qOIxP958Ma0WqDtmjEYDFKxHpzLLl68GF7js6nyomW1NJ2AfYa8uWYa6Dxn4701Jo19Vstored8T/v4LN4DedBYEpuMmmXy1A+73W5K+Y5r1f7+fkqFT+PWrLKn54mg+5kyjJSmSPHWC70qvHlcx4FNs6HP6zG+RbwTioLf1ThFT02Yc5YqkHJMU72P9fOJT3witBfr4/bt2+H+vBfjryuVSpgHCV0Xy5ZJFZgJy5JrgvIi85rnmaVqf1apVOuHV2USi/S7mcUmdNFnwZeWlkJH4wZaN6maEwaYVMC0gYXTPred6PibXr4CzY1i8yWdnZ2FAcX36MoETMrnBb96rgZly+5R4nYSUAlJDeDXQFJg4j7x6NGjMIg40Dgpnp6epjYJ3W43vG9zLnW73VI5IIDJZqVer4e64j24yblz504Y9Co7b9uJC/C1a9fCAerpp59OlLvZbCYWB2C8qHsHKCCd9b0ItF2zhAwqlUriwMrnoagHXa34PKurq0HY5ZlnngEwcVvc2dkJ9ajuT9oPgeRmeRGucCrE4LlqeHWQJc6gz1p00p5mw+G5IuXJn3sLqXfQtgc8T4giL4/UNGIT80wbMU/MMsd7BznWN/u3bmQ0nwjnRM4L6tLNe3ATwnvt7e2FOUYPT7yHNa41m83SYjQakO7lFgOSLrE6v1p3PN1wWLcldcG09aOHKyuDra5NReHND7Zsuj5onbE+aOTjAXhrayvM7VY45/j4OCVrrwbaLAn2WWH7su4v2L9UFITvqUCXzYdjDYi8h31tHtCcguxDKvxh39P65ufs5zudTupgr7CHJc9te9ocZlzXFXZ9UvcwL0egChbZ94ockLy5fRb5en5e93LsMzoXcN9Go/g777wTPs/npov92tpa2C+w3VjnvA8wadtWqxXKoPtf1k/ZNYbPr/Lx7FscG7pn0QOj3YupYI51M1V3S5veQd1ZPVGQInNedO2LiIiIiIiIiIiIiIgoicLmpSLCE4SetK3Lg+dSN+/AzzxoOay1QCU37XvqusbTsQYfZyVY1RMwUa1WU58npmGklMFQOVW9qiCGuv158tLAmFGz4hGabdwmrVN3RVoIlIUqa31R+pq/QasIaeb3338/CEuQsel2u6FdyAqSqbl27VpgojS41v4OLZoquOGxN9PCY28IFfLQoGm2hWXb1D2Tboq03DYajdCn1UqowfVZzzUN8uYBbzwVcX2zVy+Rdt7np0VRsQm9egIUfJY8V0VvLph3ebQci8bj+B3NVG/rSF2AOWcoI8U5jvMEWSW1cvJelALu9/thHuSc02q1wnNwrtG2LysVnic2QWQxmJa5UiEZy2J4oib6O3mJqMuKTRB5660mN+drtVotWKutt8Ta2lrKpc+bs/MEBKYVo7FlIrx6tGJWKn9O0PLtJRXnvYbDYcp7xnPtm6UsyhxxrVDZdr5nRaaOj49d0Ra9KnS+VAEhvma9lKZ1K+V+5jwXbc+9vAjrROjc47mXe+sXr2X3RCpWxnoie6ljip+jxPmDBw9CSIANX1hbWwt1xb0R27jZbCYSRgPjNrZMlM7FZcExriI3Nhm5usQqM2XdmZWhss+icuZ2LOm45P3V1a9I6p7ISEVERERERERERERERJRE6Rgpz/JiT93eyV1P+UUsKXn3mge807OeVD2rn5XZ5UlVfSitRRBIJ4pU9mYeifU0ANSzOvIZvSSP6p8MTE7iq6urwVpAa6smzrP+0F4gqlrkrSXuPLB+VIL8wYMHABAkiu/duxfipfiZer0erDRkaCh3fPXq1WB9tnLsaonj9eTkJOHfrmUDFiPz3O/3U9LFp6enoT9aidV2ux3ah1dNjKgB6bzasswSn0d4bIrXx+d5/0XASjPra57YRB5LpWMxi33T38mzZOYJUcwLRdrmcbFa50FZQtuPdR4nU6FtyO9af/idnZ2UVZ3tvLu7G9qT7PD9+/dT1krOPRp/URQ6P1tLuYrSeKxKluVb2SovYba3XmQlg54G3hxpmZperxf+JjRmQmOjgEm7AZN5X9cjO2cDvsiE9/q08JhDtYLzNZaT9aLsE9ckG2+nMZPeOjSPMrD+9Llt3J0KFSkLkCVW1Gg0XMEGvmdf0zioWb2UeO88RkrZJ+93vL1Z1tjIEjzKE0Equ/djPSszzDmD846OaaLf74d9EkW5WO5Lly6lvMY43kajUegX6jVh+/C0LDUw2Ydp/VlGUxkjFQjSmCi+xs/bPZTHSuft2zWmr4gX0tRiE2Vc/fRzZYOlpwmuLgvPJclbmGxQmwZEWnpTJ6MsdZG8ZygDXSA1aFTfq9fr4Zm8AE51feHnuUmwKiedTid16NAs4V4Ok2kFNPr9fjjA8SBFwYXd3d2wUVKxD04EV69eBTChrHd2dlJqT+qGaJWjVHnLG0yzKFhZeG5wuhGwE6aWl6+x7TTQ3LpdaH+c16KVhXm6pT1ueAuwupvYQ5OqStmD1HA4LFTH3mHpN7HuHgc8pS8rGqSLvh6o7FytCk80tFhX3mq1msoHeHh4GFyKbQ6uZrM5tSqXHsJsn/JETbRMFrpB8dz+7FytuWDy8ptNA3s/3ajbtWl5eTmV91DV7Oy8rMYva9AD8ue6RewvVJxKXe9VkAuYHJpqtVoqfxShht2skIBZoX3b9gl1mbJ9o1KpJA4tgC8oZOdLPQx4AiCztolnePMEH4r8XhGXaz0YeffP+n4Z6Pxm+7GqP1qX1sPDw/CadU8+OztLGZXV3ZT314OF51qvz1IGnvuz56rtHXSsK6Ae8q0RQPutbRc1IHtKpUUQXfsiIiIiIiIiIiIiIiJKYmb587JQN4SPG/QkbCWD1ZqkAbGAn6Xa+55njSjK6OVBrUPWWqDW8SIB8mp9t66LPK2rXCTd/5SRskGA6ppQFCowQVqa1l/+r2IWtOZtbGwEBkqZKGBsrbHWa/6OyuYqe5Mlc16tVqeWoVbkuZgSS0tLiUB0ICkYwjazFtB+v5/4m/fOC6YtW6aP61ieFloXtn68oHtlpe340r6TZxEvY938bavvecBawgm1PqoogZ2rVc6c8xqFDdTljPOOzmucM8iaq0dAWalwz03Iusfq+NXy5wXGWyZIWQLbV/v9fkq+X6/TuqJrn85yHQOSUutMzcGruvTZHGDqYu6J6BCLlj0nvP2CvsY6Zn8bDoehfJo/iu/lCRnMA6xHde2zzEalUnFzBdq+p3sK+5rOpWU9k8pA2ZKsNs8TAPKQ1/ez2mOebWS9jIB0ndfr9bAn03az+csoRFGr1UL/5B7Dk1lnHWoqgXl4TWh/KCM+5bFUOod5aSB4te7MnreRulQXcV2MjFRERERERERERERERERJ5JrM7MnTwzQxTEXu+6SRFzdlLXVqXSkiV3uer/m0rIC2hYo1AMmTOE/neZLNak2ipUAZKlrNVCbSBv1pQKoNJj4PDHI8ODgIlmD695KJGo1GiYzVwDhQksnmyESp3LH1z/divPKkzuchzJAHj61Ui4iNaatUKq7lGEj6tBParvOUAv5thMdEAUkG2pPztTEG2lfKxEoB8xGj+W3GeWyDteSrqIxdh5aXl1MiAEyToPGntOZq3KxlRoDJ3FgURWTwvZgnjXXNYuey7k9o/7Trle270yIrmHw0GrlrDOdtzu0q1mTrW5PwejEQHou3aORJlrN92EfOzs5SIhPazrYfLxJeMH7WZ3Tv44k5ZK0tHiPvYdo9oifi4+3NimDen5sVymzaRMeacJzjZjQapZhnfr/T6aT2ijp32NhPL7bwPA+rPHhxwd58k+fJYceGxnl6V7sPzhIKKVOmyEhFRERERERERERERESURCEn7vPimvIU/GbFk7aSe9ZBexIvahVdFPJ8c9UiZBVM9OTuMVI2bkrvZZV61CJPq6KqqVjLyXkgI7W/vx/iD8gY0aKgPryUxr18+XKIb7CJN0ejUYo106S3lok6T/Vpnn0zzwKiv2XfU19hwvP99SxJkYlKo0g9aUJE9U23r3kW3SwpXqCYxPm8YyN+G2D7rzcHqzcBx7myR8B43HAO4z0Zt7K5uZmKMdK0BJaZmkYSWOWN7RhWNtRajj0FtLyx7bFbyg5lxfTO0vd0LrXMuTJSmt7ByoFrLKiNkdJYCC/GNIsJXtTc58U5aqywTTsCTBg323+VkVq0mmdeTIjHMHlrk1enRdigeZap7P1/E+ZU3Xeqt4PCU2JeXl4On7NpaIbDYeq1vHWrWq3OtR51zstLnOylavHWUD6DncN0zvE8eLLSBhWRPgdKiE0UdfPTz85yz4/b5q7IYSXrc3n3mscA1nq099Nn9Nz+yhyksjoukMxvYiVPG43G1K59h4eHqdwuvG+r1QqBlTw8XbhwIQQn2xwveqDTAxTfswNMy7qow0dRF0/7u3nuP+cdyrLeWyQ+buP5PKiLk5U/9/J4qJssN0TnBbHq1SJrHEdkI+9ApW2R5RajC7Dd1OpcQ2igNn9bBQbKHqa8/DO2LN4Gwgv4L+pC6rkOZrnMTHOQ0g1QlvtMpVJJiEwA481flvhCr9dLHVy9lBuPS+rcg9eGWneaY5HPZecGr00WNQ8UcePTv8vW6aIPTo/z3k8KXj3nhR8QKhxm5ykNDeAYytvHz3v86EEqy0VU04d4bqPefpSvcQx6svt5rrN6ECtChETXvoiIiIiIiIiIiIiIiJKo/Dae3CMiIiIiIiIiIiIiIhaJyEhFRERERERERERERESURDxIRURERERERERERERElEQ8SEVERERERERERERERJREPEhFRERERERERERERESURDxIRURERERERERERERElEQ8SEVERERERERERERERJREPEhFRERERERERERERESURDxIRURERERERERERERElEQt781XX311BADnJe2tVCqp/5eWkme04XAIAOh0Ojg8PAQAnJycAADq9ToA4OLFi7h8+TIAYHV1FQBwdnaGo6MjAMDx8TEAoNfrpX771VdfTT5EBr75zW+GwrBcvJ6dnYX/7Xvea/x8HpaWlsIz6pV/23rSz//FX/xFoTK98sorI72/B68N9fP8W8vG8tlnrVar4TV+5uzsLHzX6w/Et771rUJl+ta3vnVupmitR69uPXjtat8ri1deeaVQmf78z/88tFO1WgUA1Gq11JX1zOtgMMDp6SkAYG9vDwCwu7sLALh79y4++ugjAMCDBw8AIIyvs7MzLC8vAwB2dnYAANeuXcPNmzcBAM8991x4DQC2t7exsrICAPjiF79YqEycI86DV7dZ9Z3Vrlnf17GZhyLtxLE0DfL6XdZnZkmIXnQsFW0jouw8Ms1nsn5vnmXK619lv1P0+x6KlilvHvf6Oj9Xq9XC/MHXuEaenJyg3+8DQJhzOMaXl5fDHNPtdsPnB4NB4l5cn/kbAPCNb3yjUJn+5m/+ZgQA7XYb7XY78ftc41dWVsI81Ww2w7NybdFnA8bz2/7+PoDJfMj/j46O0Ol0ACDMmYPBINWuupaxXv76r/96ruOpTD8rOocVube+v6g54kmiSJnyxhL3ooPBILT9+vo6AGBjYyP0s1/96lcAgDt37oT3XnrpJQCT9fTg4AAAcO/evdR+ttFohH7mtbc8a6E2+qu/+qsRMO6zHIvePoKv8VqpVFJ7Vo7x09PTME64v+Z+++DgIOwlWM6jo6PwOR1fvDfr+8033yzV76Y5Y1jo/t3uzXVPdd78moW8doqMVEREREREREREREREREnkMlLTIs8KcHp6Gk60tJTRCtVut7G2tgYAaLVaAMYMFr/Lq54ay1oH1UJgWRh9z2OdpmUzirAmlpmaF8qe9LXcrG9r4VBrYd5JvmzbePCYQO831OLAv7NYPyDNJnrs4ixsgQdtf7WI6lWfX61HtHZx7NAS++jRo/A3LUm0Rtfr9WDp3dzcBDBmfS9dugQA4cr3VldXE1bnIshiIbPKnsfw6mdtG3r3t6yovX+R5yoDb7x7r3l9yfbBvLlgns9cBtP+7rzHyawoMhdPgyLfnWVtKsJgqNWXc0a9Xg/9i2Of1uKTk5PweTJCZIIajUb4nL0CkzVYrd9FvDDOK5/n4WCZ+Wq1mhpjtHzX63X383nIYgTKMkHnIW+eyps/zpsPbd3p/qXIM0SkMRqNwh5H2V32JfZ1rr2tVit8jnvWRqMR7sd76Ro6Tw8E3StkebToe3msDffe/X4/jHkyudxjHB4eBkZKvcJYH2SKOS71ORYFbz+gXlG88m9v3bUs4TzGf26pp3XR0Idl52Kla+Ow0jlhb2xshIMU73V8fBwayh6ozps8PdgNmvf8HvIOTXmT4CxuLov6jreJs5tADjRgMlmQsq5Wq6E9tfPmbXrLwlt87OFNF3VdnO3kQqhLYhblbu9LzGOw6QbIO5zyyt/SscPJjS4sjx49ClfS7pzgiEajEcbTxYsXAYwPTzxAXbhwAcDEraHZbIbfnCey+gPr2TOQ2IXCcyvN6xtE3oajKLx7epO35yLM/7MOhnpw1s8UcW2cJzw3XyB7sZnn5nMebcT7ZKHsIajs98u4rpaB7WfAZGxwXq7VagnXeWDi3jscDrGxsQEA2NraAjA5SHW7XffzNL7w/rzqBqUsPHd2vXobQ/tbaiCyByh7VXiuPjqHzHI4tPfL+p+w7cm9jXeg8/ZT0+5fPm4oa+BdxG/rvKNurNrfgcmY4rgA/INUmZCPaeC5o3pXe3jQAyP3dWpw8Q5QwPjw5B2k+F3dI+rzlUFRV2rPKJ7lrjgcDl1jDTCeV/ie7jtmNcBG176IiIiIiIiIiIiIiIiSmJmH8yyZemrkKVEtX7Sc01JGi/iFCxeCtYyfGQwG4R5KIU4LtXDnWcjKiBjkCVGoFU/fy7Jez8vSW+Q+2k58Dlober1eImgSmFhkhsNhuL9aJWzA8yxQy5tlSZSx4XsqjMHvWibNs2Rqm3uWpCzr0jTtlGdRymuLTqcTWCcyURSb2NvbC5Ykjg8yvKurqwkmCgCuXLkSXqNLH9u1UqkkhFzKwGMVPDZF3fEss8S+pAwO29Bj8/Sedm7whGOmhceAeXNdniUyK/jVgxcQuwhXxSx4TJ/H+GWxZY/bMj5tkL7nXlX22RflRpi1PlSrVZcp4vpKN1+662xsbITxzivvubu7GwRquN6qKIRa4IHx+JwHY+2xslmuSgqyAKenpykBDM+1Ow/zWm+L9H1vvbJ7Gk+syVrT9b0sBvm3BfOc77z5m//b36nX66k+zrGhHh+WkdJ9yqIYKc+1z9vHWJydnYV+xjVW3fmsyARZqMPDw5TQm4rXWPe5RfVDj5EFkFmm0WiU8J4Ckuy9nVOHw2FmmxUtU2SkIiIiIiIiIiIiIiIiSmIukWEee2Njo2hR39/fDydJWrxoKbtw4UJCZAIYW+VpJbfWgzyWKAt6UrWnen3P88E+79SvV43nstanwWCQisWlMZ0AACAASURBVPfS66IsGh4DlyUxe3Z2FmRqrQDI0dFRyk9WfU/nEWCpdWzbSS0i1udXmQnLajSbzVAGWpTUommDNM8TF5i2TN5v6W9aC8vh4WFK9pz/Hx4ehrZjWWhR3t7eDkzUU089BQC4fPlyGG8adA4kx9o0yLIiqkXJq1uWl23X6/VclgpIxsCpNY735efzYhrzUCT2wJt3zhM0yWKen2SsgMKrL7Wge8yv7cN5QdZPCnbOq1TSUsDaNt7ns7AokQ2N2+Fv8Hnq9XqYu/ja8fExHj58CGBiTeYccPnyZVy9ehXAZJx/+OGHAIAPPvggMFKcF9fW1oKnCNtXvUNsXMQ8oCySZaYURcQmvJhDDzqfTNuO57FCViRjMBgkPD6A9PymZeF+xBtrXhyl/d1FYFEM7JOAF2PdaDRSY0/ZG77GMchrpVJxY36z2LBpUCRGyiufjlsVoQHGc4eVOOdVGSndj1uBDr3OU2zCY6z5mo4lloVlq1aroV04r3E+VK8bXnXPPS3jGxmpiIiIiIiIiIiIiIiIkpj6+Oid2PTUyBMefSsZ23F4eBiYDqoIXblyJfxvT4unp6cJZRv9vWksIOpb7cXQAGMrg/VD9+Jr9PetNUL9N22Z1PKvMpT83iyxYEUsRmq9429Zee1WqxWYKLaT+tbzc3xNGbs8RqcovFg2Qi2TViq82+0Gy4S1MjQajcCC0kLBa7PZTPnbs1wA3NiAsmXK83EmhsNhYJhoDdrf30+o9PE1YNxuLCetMLQo7+zsBCaKY2xnZye8T2uNKjVa5b9p4DFT1tKsVlb+Ptut1+uFOmBbcHzo97w6tG0ybUyHZ/HV32M5dO7IitvQ57J+3f1+PzUHKBNUlsGaFsqCKDOYZTn3YnUIZacXjfPmO6+PeBZJYNxebENvLpuV2TwPypTZuYvPtby8HPoXrcQPHjwITDXLyfF+8+ZNbG9vA0BI3P3OO+8AAG7duhXKqfMD12eb2JZjclZ4rJ9tJ68P6ZjLUu3LSrhJ2LlpHjGUHrLiUqz1XBl0lskyjvq3V85FKUY+jns+TnjMnfX+qdfrqXKyzZrNZvi8erkA4/bw1Gftb89Sh57Euce6WtXffr+f8jjyJM7tVZPvqtS5xmtq2XRdnAVejCB/S9dRqzLI8ba6uhr2ddzrcN+n+3E7PrVM9lnOw1zEJryDDScKK9d8enoaskJz8uZmb21tLbga6CRuM63PknOJDd1oNFKDgRvLVquV+JufsQcvXQjsQYqN0+12QyfUXB02bwc/0+v1FpZTyk7Go9EoDCy2E59ne3s7tA9lsu/fvw9g3HHZibUubCecx8RbrVZTG1T2Az10cKDU6/VUXiV1kdMNCTBxb9PBp4GJdhOttHZZF0zPTVSpamDc/laS+NGjRymXPs3Fxv7LstB176mnnsLTTz8NYDLWtre3Qzmtq6TmeCuL8xZzG/SpCxbf4wSngjTWJUFFKnQs24lWr/NyldX5R38b8OcMHRv2IKVuCXaR08OVBqPzOsu4yvqubvy8QGQ+I79fr9fDhpvw8vDlLUTzEGPIu6/OHXy2brcb6pnjjPdqt9updvXcHBctMKEuQTZFSL1eD32H7nz3798Prz333HMAgBdffBEA8Oyzz4a196233gIA/OIXvwAw7m83b94MnwPGB6msvDLD4TB1aJ4FOp48t3ObokLXARtMrldvP5JliJhmLOW58ek+wI5r3ZiyjjnmqtVqat5QFz+791AX1UXLbv+2wqsvzz2ZbXV8fBy+Y0MENEyiyDwxDfL2DwpvD+odoHi1Ln1ezig1pNkDlOd2PAs8w6XNk9fpdMKelc+romg0HvGswe/v7e2lcu31er1ckiS69kVERERERERERERERCwAc2Gk7Cmu3++Hk6+1pFer1cBwXLt2DcA4IJbv6ekfGJ+medK3jMQ0YhN5SQBV7psWBzIXy8vLKclLpVezXHfU1WzaLOxFkfc9LS/r4PT0NJzq2T4apMz24ffeffddAGMLKNuHbn/KCmTJlZeBWuoIto+13AETS8XBwUHCUgtMLLf7+/spOU+V/qQMP5md5eXllEVbUbbveW4Z1op1cnISnpGW5IcPHyZcY4EJQ1CpVAIzwHFF9unpp58ObC9ZqvX19dB/LQNxfHw8NSOlZbGWUrWesj5XV1fD3+xzRKfTCfMGy6sMnLXWr6+vp8aW1muZdspzR1XXN2vV6nQ6qXQBakG3z6AuwNZtblbWKa9MWe95jNTR0VGYH2y/WFlZCXXNefE8i/jjcvfjWK3VaqFfaBoOzge0tqqruRVaUFbB9u+8gP9p4DG4nOvYp4bDYRgbdNU7Pj4O4/x3fud3AACf/OQnQ7nffPNNAMAPf/hDAGORCWDMXvFzZLJGo1GYN/k77A+NRmNq152ioirqNWHnXBWYsGu2JzZRpG3mNdY8t1jrWXBwcJDykiDa7XYqETLnxdXV1dAPlKWzYQRPmpEqIgZ2noBT1ucf19xRq9VSYiWcA6rVaqhzuxepVqu5qSLmgbyku4SKm+ncxX7Hq7JQWYxUp9Nx3Z/t8+hYnIWR8uYAXnV/BIz3q9wTqUsfMGahyLAzxQvHYK/XSyUZ1t/yBDSKIDJSERERERERERERERERJVHavOSd2OyprdfrBUsmrX+0zly6dAnPPPMMAOD69esAJqzGo0ePUqdFlWOeh/VPLdV5EuR5suQacG2fzVoz1HqgVnNaNGiN9ixq84QXw9TpdMKpnid9xtS88MILQTb3/fffBzCRzb13717K6t5sNsNzz8NH2JOxtvECy8vLwZrM5xgOh6EP3bt3L/Hcd+7cCa9ZRkoD62nJ0bgpK5c+TR/0EizaNAHHx8eBHWTb7O3thfHEccQ+uLKyEqzoVur86tWrge3lGGPdsczApO1V7rQsNC7Gs5RaS3ar1QrPzbrUhM9WkpX/9/v9lDxwrVYL7WSt0dVqtVQ/9GKrrPR3r9fLFY2wlmFPnEKFGqyUs5cQfFHwGCkvOJ5XPqOyzl4g/KLiPPPgSWizfBzn9+7dC3MAP8dx88wzzwTfepaX7Iy2uTeO5wGVOs8S8jg4OAhMFOeEjY2NwER97nOfAzBh2X7wgx/gO9/5DoBJbBTL+9nPfhaf+cxnEp9/7733UvfXPjtLjFQRwRRP8pjQdvXEKfi9vPgR+9vKlpcth/5t41KUBWA97u/vp+Zv9XKhRZ19kB4Gyt7z97y0MHa+ehzw2CQVSLGWfj6bymirsMo84uDLPLvtk17qG9ZzpVJJCF0Bk/GpXkl5yctngRe/QygjauPvNdbVrqcHBwcprw/20W63mxI+03FpWblWqzVXRkr7gHocAWO2nONKmSgAuHHjRvCm4j25p1Imi3P88vKyG5dFFGF6Z1bt04mLA0Ndc/jQfJhLly6FAFdu1Fn5nU4nFJiNORwOXZc+vZaBTj6243v5HpQetW4W6vZlVVk8xRS9WuUQnVRmmdgtvAMdB9je3l5oJ5ZFg5Wp2seNB9We9vb2wmZdcxFlKY1NAxXfyFIO0w00n3V1dTWU88aNGwAmm6Hbt2/jvffeAzBxb2H5O51OapFTmpybfHUrLDvZa3+1C6+6gXAM0Aixt7cXFmU7ka+traUOUKS1r1y5EhZlbpRqtVoql4TS/tMepIBs5Uo1hrAeR6NReCa6VPKwp8/oTfKcU3Q8si3sRD6N+y/LYo0pOhfYhWl/fz/lGsHP60GS5WeZNzY2wt/swysrKykhnKKqZPOAJ6rB5+bzbG5uhvbigdgLvH4cgfB2PVD3J/Yjihh99NFHoe242FKY4bOf/Wz4Luc69r/9/f3wnrrzFFGIK1sOVZTla3zmu3fvhvmBz/GpT30KX/ziFwFMXHh//vOfAwD+5V/+Bd///vcBTOaML3zhCwCAr371q8Goefv2bQDjg9Tdu3cTz8V+2W63E+7URZC3FuSpm1UqldT8qoemrDxSXiD+osaL517nzRG8npycpOZvFQjifojGTM7drVYr1BXv3+l0MvP5PA7onklzcgFIrMnsO2wf1oUG/asia9Zebx7IO1irEcP2KTUIWsO3hnlYQ/u8kSei4imtck9xdHSUUuTLyxWlipJWEEpJAE+sTQ21ZctmDVQqxOWFCrGeOU6ef/55AGO3Zu6JuHflXurOnTthLeDvqBKqVdwt2pbRtS8iIiIiIiIiIiIiIqIkCjNS9jTvUbBKv/HkyJMkra7Xrl0LJ0daz2hl2dvbC9Y2nopVCjaP2iwKlTy0Lnd69QLHbb4ptVxY65laYi37oIyXl0dqntnjbd0BE8vDw4cPQ33QGkZXj+vXrwfWhsHKZHMqlUpKmKFWq6UYo1kCRdXSZ91zaEE5OTlJ0O7A2CrCfkXRBTI0zz33XGCpKJzBMn300UeBKmY58qToNe9HUahEN8ungf1AknrmWDg4OEiISwBICExYJooWzUuXLgW2QIPV7W8qk8K+URSelUxd4IBkf2d9bmxshM+xL6nLJi1sas0FxswC/+Zzt9vtlMuRdYUpWx4vB5RKY7OPaIZ7a8nk9/r9fioXHuGJ3uh84gXBLsrybK2sKysrqbmRbXXx4sWUu6hazZVR5nMvIg+NJ1Orz8E+wjHV6XRCADLd4L72ta8BGI8fzgu0ZHJ+OD09Dd8jO6eM1DxYeO1fNtCdVlSWA5gw7l/60pfC2L9z5w6AMRMFAP/2b/8WrNC/93u/BwD4+te/DgB46aWXwn1/+ctfAhjPixxfdC3T8Tmttd1zo8q7l+fmlZeHz3MrVWaqiOhBmbLw+e3arp4smnuHz2EZac7V169fD94gXLfI5lQqldSc561NWr+Lyvmk+RqB8Xji31Yk48qVKyF8g/Mx2c533nknrD+sO08Gf9Esm7dXsfs7zbOo7BmQFjvRz8+bmcpjolTczKYtUEaKexwVmLBrLPvw2dmZ6y6tKT8AJHJzWuGoMmWzba75JdU9luXk3obj5qWXXgr/a85TYDKPv//++6G8ujeyoRtlx09kpCIiIiIiIiIiIiIiIkpiarEJ/V9PjkCSWeJ7ZAmef/75YKXgSZb+iw8ePAgnZbVSzIOJIsrGgHgJAu3zaPBrUWtYlnVuXnKsHotnA/b29/fDSfwTn/gEgIl8bqvVCnECr7/+OoBJO127di34pbINz87OFiL7eXZ2lmLvVIyBz6Ty4LSQ0f+fz7q1tRXEF8jA8TO//vWvAwOn8sg2TmYebOFwOAxlsqzQ7u5uKAtfOzk5CXVKKxCt45cvXw5l4ZUWza2trdA+7A+9Xs9NysdnsbK858ELDreBxScnJynmtdFoBIs3GTX6Nbfb7fBdGwB7dnYW/tb4MlrCLEs4bRybpnXwZNq9+ErPKs3ntGytjk/LdCv7bX22522h9RhrotFohOewgiaXLl0KFnOWSWX7aTlUa+6irORZ8andbjchm89y0HJJJoosfKfTCXPef//3fwOYxEqtra2FMcc5s16vZwrrTFNWZSRpabYJ7QeDQRjnn//85wEAN2/eDGP43//93wEA//RP/wRgLLTz8ssvAwD+8A//EADwu7/7u+Fe9DbQOd4TOyA03rEIPJbEWuqzLPe2r3upS+y663mHzDOOTb/jCdLYK5AUJqAnAedosoo3b94MMXusd/YHnZM5xtSrwZPdnudY0/pjv1RBJ67Hlm3b3t7Gpz71qcR7vO7u7obYZeJJyLd7fdJ6CBD9fj/V/72UNoua5/T+XtJdIBmbx76iSXetmFun00l4aQFIxN5Zr61msxnmP9umy8vLpWOkPE8LO48fHx8n9kJ8Du7dOJ8zlcPGxkZgoDh/k3G/e/duKrar3W6n0gvovF6kPSMjFRERERERERERERERURJTM1J6AqeVQi2SPBXzpEcf7hdeeCHIFPLUp4wUrc15SQ9nscryRKuKbNbPdDAYuGp61mKSl9xX5Y695IHWkqH1Og+rs2XGRqNRKLvGAtnYKP6/u7uLn/zkJwAmp3lC4yNYpuPj48w4EC1fUWiMC9tFVSGBcT+jzzUlzj/44IPALL3wwgsAJmouTz31VLAIkhWlVX17ezuwppR7f/jwYUJ226KszKxakaxinrJs/Fut6V6MCjBuL1pm6G+vlmR+T5V8rHKPxpyVZaS0f1krMdu83++nlIFGo1GwdPN5lTnkfGFZs36/H9pcrcG0yKlynD5LUXgMm8auAGPLmybL5NVK5WtyYNa/VfI6ODhIqSV1u103SS/rbZ7Q8lq//3q9HlgYWtBpNb9y5UooJ8tC2ex+v5+ycg4Gg5TFcR7P7d1Pxxnf05hCqtaRhSdef/11/Ou//isABKU73uvChQuhLpShUWVG71nKQBOas09wLuDvbG9vBwssn384HOJ73/seAOAf//EfAQBvv/02gHF7/dEf/REA4Pd///cTz//d734XP/jBDwBM5rzV1dWwPts5XpnlovAYKe+aF2PmySJ7aUbse0U9WaaNkcp77mq1GvY+asHnvM31h2vTc889F5h5ziMaq0Jo37YMuGWoZoVlwbW87J93794Nay+fm+uQrluWYffi0DUe3iLvvbI4T73PsjDKUti0LJYRtfeaJ5Q9s23P/cTx8XGCiQJ8RVmNi+LcYtl19ZbQWGDOpVbJeBrVvrwYa43D43rCet7a2gr7O7KenLdOT0/xq1/9CgDwxhtvAJjESB0dHaViEFUl1yp4F2WkCh2k8g4zo9EoNCYbbm9vLzSKuvQB4wmDA47uCtwYPXr0KBVUr25z8xhIGgTNv63u/unpaWoj0Ov1Uht6hT1AacfTjsarvg8k6f9ps8cD2YdNlZDktd1uh3bhoszv/fznPw8HKbYTJ8irV6+GxVhdd+YhMkFofgYLL1cH3QQ+/PDDFK3Lsr3wwgsp9wn+zqVLl1JSz2tra+GQz77NfjHNYqUuR9alTwUm+Fvsn0tLS2HM8LChbcHDL92u1AWJz6m5orIymZ+enqbyRhSFGgA86W4rHtHtdlOHQ17r9XoYK5z0GFB6dHQUfofPrbmirGvNPMQZrPGoXq+n3FguXLgQ2ob1zzbzyq8S9zZVhHe4UsPOIlxg1JWR7bKyshLGCduBh/bLly+Hz6tbLZAUS3ic8PK46OEXGB8suGHlPPKzn/0MAPDtb38b//mf/wlgYphhqo6nn346uAWzX5+cnMw1sFxzBNn5hvPtjRs3wpzNsr3xxhv49re/DQD46U9/CmByCPqDP/iD4NLHNqQ0+n/913+FAxf7+JUrV8LnWGeatmOWg1SWJL4aKYscSLMEJex73rif52Fe29zOEa1WK5VnZ3NzM8zbnM/YllevXg1zCaEuwnbtVpEHz51wljkvK53A2dmZu6+waS7Yd+/cuRM2tOxLFENhOYCk0dTLIbQoeJt3LaM9/I1Go5TRnW1bq9UKPfMs/U83+XbvmneQUolzT1jCGsD1IGmFJdrtdkJcAkDiYDWPPHNWiOv09DSUnYefZ599Nowdzlf8zHvvvYf//d//BTARSuMa2263w9zIdXplZSVhxNJr0XaKrn0RERERERERERERERElMT318f/j7OwsJc15cnISTqa0lpOGu3z5cjg5kkUgI3V4eJhItgr4CfZmgVKi9jSvspH8m1elQK3bDTA5xXvZnu3Jvd1up+hRXgeDwUyn+iyZV5XFJHZ2dkK70JpOi9GPf/zjwOiwTLRG7+zshPbhPTUx3zxcMLX9PXlo3p+/Sav4w4cPg6QvXfx+/etfAxi7KLK86qIEjNtEpbmBcftaSWhNtlqWvfHc6zTjNjAeA+yPLFuz2Uy5WKlYBl+zAiAqMKKuZJ5L37RlIlQ2llDLFX+T99/d3Q39g+OD16WlpcCusc1ZtqeeeiqMQ743GAxSkuO0KKmFugjUWp7FcqnrmrpmUjKb7AHL02g0Em6OQNKCqCwdf2fRgdd51npl3qz7jbpyWku0XjWJJT+fZd2blwuM515l+9b6+np4TrIxtF5+97vfDWsRrZZ0G3n++edDH9R2yrJcTjP3ad9g/yL7xfFw/fr1sHaQef/Od76DH/3oRwAmczWlzv/4j/84MPJk2f7jP/4DAPCTn/wk/A5FD5599tnAjFi3oV6vV7pf5olN5DFSOv48b5girn2eK6Bdo2aBJp6260S9Xg+v0YruJd0l46tCNtatc3d3N1jU6ZZ/fHwc2s56yCxKMlyT1fJ5L126lNr7sA7u3r0bREzs+qleSzp3L1ruHPDrR+d4L90Fv2e9kpTFsX1s3q59eUl3PalzXeeta73uZe0403Lb/anuXdkH1MVvWtc+IC3opa6tvC/3Zs8++2zwLODz0rX8jTfeCP2O+1lie3s7uAByT6WiQbZdo9hERERERERERERERETEgpDLSHnJDq11t9/vp+QWh8NhOO3RF5gMwMrKSjgx8wTJOBQNdFf24XElZdPgdesLqwyWBlAT1q9WhSs8K57n283rtP7BeZYWtWLwdH/16tVgGbNW2tdffz20J2MDyEitrq4m4n1YzjLPdR60fmzAqsaX2RgzldBnv6IV7969eyGomv1RmSlaYjVGjXWlAeZ8vrJSwMpi8JksI9XpdEJdskxra2vBiuIl32UcoiZHBpJjUyXv+bdlQqZlo4CklZhtQvT7/ZTlbG9vL1hZyXzS0lWv10Ndsf7Vuku2wIqPAOn5qawUsMdIcfyqBLtnQbfPoHGTNobyvCBla2WeR7LNsuNQRTI4F5Dt1ZhItiM9DA4ODkL7KXu/KFngLDZI4xbYJ4fDYbBSchzQerm7uxvGGRkapoO4fPlyuJf67nuWafscRaExMewfHNMc9xsbG6G+f/zjH4crv/ulL30JwETq/HOf+1yYW1577TUACMIUjx49CvM/y0uGBJgwUSrQNO3apDFS3nrrSXnzb2VBCW/d5NVL6urFds8Ddqyr4A7nLq4rW1tbqfhJPs/p6WkqvQfZ0Tt37oRxx/dOTk5SsS2ecExR6D2yUrNo+dgvn3766VAm1gXLe3JyErxC+D3dV9l90eNgo7KgCW6tV5TG+up+CpisT7r+LWqe85Lu8sr5WRkplTpXrxO9l7KAnqiSFw/lMVHAuJ7KelNpXVmhB2V72ac0/QZ/l3upW7duARjP5/xb014A43ncY4EtGxZjpCIiIiIiIiIiIiIiIhaM0qp9hEoUWiWQRqMRTo5kpPj/2dlZsK6QkeKJUmMdPIvtPMBT93A4TDEcPIUOBoOUhUxP7ta6okovntynLct5CQJnsWh4qjpA0sqp8Ta0Kqh/KTCOL+J7ZD9ora1Wqyl2zlMwmgVqjfCSwgHJ+DNayNbW1kL/YtyU+mXTIsM+Ryv6U089FcpH60Wz2UxZMK3VpgxoPTo8PAyWRfV5B8btxPKqUp/HRAHjcaXPC0zq7vT0NCGBCoyt8KrSByTbcBYpYMLKxiqzoewX5wvWBa1Iy8vLod/SeqT3pyWKDBaQjhOYxXLOq7WQaQyQxk4CybhKj+GzipAau2PjJTURrjd3lB1fZT+vVlfLRLEO7t+/H6x3Vnlyf38/wWIAi4t/yJPLViVCfu74+DiMCZaJZVRVNcYVcdxVq9WUcprGDM0jjlcV8dgnyDbTItvpdPDuu+8CmKgNHhwchOf9+te/DmDCTA0GA/zwhz8EMGGkyMhtbW0lUkMA435qGWs+l1rdi0LjDD0miv/bGAUv3UhZaXSPoZkHI+Xd1zJjtVotFZ+3vLwc3rdj5/DwMMwf9KRgzOX9+/fDHKleP5YFnUdssiKvjtRTgDFghKrccR6w9a77nKJtMm+mR9uP9+73+6nYLZZVY2kss6NjQ/vtPBSMCc69p6enbmwUkFToU+XXvPh+G9/H8i4vL4d5SON+2Z+tGrWuW2XheXmwzlqtVlgb6Y3SbrfD/oVj46233gIwjolnvdgE2Op5xHJrKgEvvq1I200tNqEShTY4fnV1NbiB8coCHR8fh4mcE4W65thN2DzkixUaOEjYgNV6ve7KPtqgaktB6r28TNDaQdkxrGuaHryKwqsfu1jpbzBoemNjI3RGUvBcpPv9fjj8cnPBNuz3+ynpVe85Zmk3XYDtfVR8QrNTA+MNBw+KKkABjCcZDhhONuyLJycn4bDBwbq+vp5yLdPnK1s+lTy3mcbZDqPRKJRJ6WiVOweQEJhQcQm9l7oQ6kEqa8M/TZm0nWwdaWCypc4Hg0GoA/ZVPuOHH34Yvmtd/HSzqcHNWTnMZjl02E0Sn+ns7CzMD1oum5dOn0kNGnpPzVeV54r0ONxerCtPr9cLCxKfn22mQibWzaTb7aY2wfMWDSoCdVNSlw0bzMzx88wzz4SxT3dmts3R0VFCTAkY10lWmaYpI5+10Wgk8pPxuYHxPM28fnTxu3z5Mr7yla8AAF5++eVwD2BsGPvud78LYCK6w7nyxo0bwTDD8aUHTfZVQoV/ypZJ/7Zrk7r26dixaUA8o40n1Z0nTpH1vWngjVP+jmcMGY1Gqc0f1wSVvFeRCWA8L3Js6YYvqy3mtWfKy4ek5cyac1UMah6Y17zh9RVN48I6Zv3qXs3mONI0G1ZCe95QcTQ14AHJ/FB2Pla3UXtI0VxRutcFkm58+pp16eNcM83e1TNcEmq05mGdz3F2dhbGBw1iNMTu7e2FMlmRrs3NTXft9g6YZRBd+yIiIiIiIiIiIiIiIkqiNCOlJ3cgKWLAE+TGxkawnFvXnIcPH4YgSlpeaIVSi9c8k/AqNHDQCkPwpN3r9cIJWClRT5YRyE/Qq7/Dk7BaqzxGah5Jzax0MzBhlFi2arUaTvW3b98GMGFxNAs7rbS0Qmmbq4Vjnm2l981iGzSIX60qLCetEWTgVGhBhU2AcTuzPyrbynuxnfh7au0uCpUl5d/WBapWqwUrNOv/qaeeCuOJLjgqMKHiEsDEErW/v59goviex0SxTGVdd7SdVHKcrwHjtlE2k99jm9m2ODo6ClYmPhu/v7S0lEoKqqzOvCygH+copgAAIABJREFUamFUphoY9wVaxjxGSlk3/YxCWVWb+FH7tee2syhGxzIGGnhthVWGw2HKXdOzlqubzyKe1cN5DL2uN8BkPlQmiO+pVVdTPRDzdKvibzabzYTbKjCZl2/fvh3GBj/zwgsv4HOf+xyAiQsgpdF/9KMf4f/+7/8ATNqCXiLPPfdcKC/bUNMvWHfmaYSfPIuzdeNTV3q9WvGbPFnpPGl0zwVvlnnCq4OsBMEsC5Cc56wXz+npaUoYSNNT2PHnybwrG/A4Wd8nJVleFh5zZgUidJ/H31TXTMtIabssKhyFYP/pdDouE8X/PYlzuwbp/KyeUkCSfeL8oO7nnksfkJT8nwVWTr9er4f9F9vk6OgozA8MS+EcCUz2fty7cg/YbrcTwiLAuA1nTWMRGamIiIiIiIiIiIiIiIiSKCw2QdjTnPrD80R78eLF4GPO0y0tMPfu3QsWNZ6ceU9lpBblT2+ZBSAZ4Mr/bUxHVkAsr3l+wtZi5DFvynRY3/Ay8CRLgWTcF+tA/bIpusDvbW1tBTaRn/dO8B7+P/a+rMexLKt6Oeyww5ERGTlnDT0VPTJDt4SE4IXpD/BDPl6QEEhNVQMSD/wN/gaDQAgxdwM9FD3QdFdVZ1ZWjjF6Cn8P1jped999j++1ryOzYa+XG2Ff33vGfc7Zaw9t22dblk3raMNKq52u9Z86OjoqaXA0qSHLzXqqBpGfsQ3V56MuNAS5OqwDRcdK66fx+uuvJx8pBsSg5rnf76c+4xzTABM57abXh+tq03R+WO28srLU/s/n8/QutoWOWbY962JD0tryt+VL5PlIeX4QVk55vhmE50jrOdV7/iQvAzq/bMCNHANnAy8oXlZoY9uO6geoGk9e2a+Wcdckw4QyNG3UTxlPtiHXSFoOPHnyJN1Hu//Pfe5zSePKtfVb3/oWAOA73/lOKjfZbAaAOjo6KgU9OD09LfXjJpp2byx5663nX1jlI5ULiuMxUvxcr/Y36yL3Wxsqez6fl/zOdB9lA9jYFAIKj5FqYyxeVVLcumhzL+H5MXrBJuyarElpbQoE9qeuZ95+uQ1wXJydnZUCSuQCS+g6afedg8Eg1c8GlvB8pJSRUt8oXjfxkbKsscppDQzBOqnlDbCcS/v7+6ls3MOyTmrhpLEONu2n2nmkCGvap5H2NNY7N4N8Bs2mNAINK8R7tCPqTqCmDWAXz6pnepv3qvwK3nde+eps0oDtUMPdbrdkMnh2dlYKdsDDx3A4LAw+YNnn6+SA2GSgWjPF3OKsY4hXCoHd3d0kNFg3jVzn5S+wigN1Tm0qNNjWal6nDubAYg7ZnFFvvvlmOlRxXmmACQ0uAaAQYEJN+oCF4NccElqndXKYab/ag5k33jUABcG5aBcwYDnmWMfd3d3S4ue9c5sHEW0nzxwvlyPKc6q3hxQvwpl9zlWgrszTAzNx1YElVr3Lk8N2rVHzVCtrdGxvKxAS54Gar2qkT4KbA+bAu337dlJGMHgOc7Odn5+n+2kezEMXUDYN0gAadjwDzcefZ9pn29Q7SE0mk+xBqsrMzwsA0XagkzrPuLy8LJlO60HcBkFRc3kvEq5nxleldL5KU7u6kfbqlOmqD3DeQUr7wR6k9vf302dcv3nV/cC23FL00KSHKqB4+PYUpXasUNZoZD7v6uWMUlM+oEgCtFFnT0HgBfmwJowsx8HBQcE8Ub9ThW8ux2HTeoRpXyAQCAQCgUAgEAg0RG0bsiqN6nw+TydUhmu+detWov/UiRVYmCZYkz51rN6Wox6RM8XJhR/OYZX5mWduV3X/prDPUU2EDc95dnZWCD0PFPPb2Pu9cO8e67At2Hb0GCkNw221MN1utxBsBFgyQepkak1P9Bm5cJ2roHS8MmjAUuNz+/btgkkfsDDxIxNlNSyaj8nLGWVNGNVMxI77TeafjjvLWs/nczc0umaFB5YarsvLy1I5PEYghzaYqarfahAGNX3zzP20LHq/N4arWKirQpO20mAcdm7oONJ7tiUfmj7XM/EhbNt7eQTryrymY8/Lu0b5zPF/cHCQGCU6VE+n05TigUGDNI8KGW4yU2pS6+WKsvJzE9QNNuEFarEmbboHqRN4Ireea0jyNselltGuV1ofazKrbWBZnF6v5wbQeBlM1P8GaHvZMT6dTgummECRkWKbW0ZKTfs8BreN9UhZqJwZqGXTVfbaIGfD4bBkvqdXL9S5ZaK03uuOQU+m6jpv92Rqgkmo2w7La4MdqVypmzOqTp2CkQoEAoFAIBAIBAKBhqjNSFU5HXc6nYIzP7BwKOdpklp4Osu+ePGiFF5SNWBNmaCmWJUB3X6n91Ql9fPYCc+eO6eFruNIuw48x1tP62cZGvWnskxUUy1eW/XxtDpe29ow3J5W3GpodnZ2kvZCNaZWq7iJrb0m8tTgEsBSu3z37t3ERNGZ/M6dO2lusV80bK5long9OTlJ71S7aasl83xc6kL7pCrQiTpzarvZMce29YJIeJrtOk69nU6zMPXevTmtt5Vl+ve6fp5XGXSi6fM9vxPb31646VVt0YYm00Oufva3Te6t8+6mUBlmE35STl27di35Iqv/MYNMUBZwTh0eHiY226YbODs7K6UeyPkp6xyvC2/eWmbKC1zi+UgROUbKY5+qkvRuA7m1Sb/PsdBV+wyg6H9TVZd16rbJ/NvUv+RlwWPJlelgvTRRraaAAYrBqnKWCBZN1yVgyTKfn5+n96qPN8uhTBSvmtxW63RwcFBK/k2rl/39/XSfpump8gXetN9zc9MG+lGrEBvvQINb2TXK8z/O7Wfr1ikYqUAgEAgEAoFAIBBoiFqMlKdtVv8mG9p4MBgkzRI14poItUq7v23/KIs6YTHrarU8H5qqZ3nQNlnXRyJn66mRTyy7CCxP9bmIY001ttuCx4IQyvZZhkNtor32sd9pX3ht0LTOak9NzZBNvnv//v3kI0Xfhhs3bpT8iaiBOj09LYRVB4rRAamx0r72GFjbBk3hadiqGCoLGyVsZ2enUsu5DkPT1tj0tM36XZP50dTfpm2sy0R5cy+nQd8Wk7OJxcKm1g7bspZQO37KLC+Vgw2NfnJykrTVNlLp0dFRwQ8UWK7JGopbrUJsf27ix+tZWnhpRGwS68lkUimLtH3WTdK7iRz3UIfJ9uSH7hOq9j9Ve4g2mKg28OPCQHlrim1zz0dK/YlsegSN2pfbx7ZhWaAslA1xrlZD6lvH8lgmSqPa2Wh9/G5vby/d76WIuKrIrFXzxrazWiBVWUu0EaHPQ+2DlDfxgUXDatZjfsdOp9BWp9YqkyIVGNuCR6F7Gz0vGIQtmzdhvI7LCfttwzN/0qtdrPT/3CGiDjapb52Nc65uQLkv9DBZN2BB1QF5HWguJApnNekDFqkD7t27BwCFABMaXAJYzqvnz58XgksAxRxZucAZ3nWT+ZczlfLM8ap+7x2kvHdUKUK2BR2T3vjMjdmmgRxedTSV1a/yIbGt3276e2+ucgOj+eu4eeJm6vz8PP1GHeP5OxvswAsCs+0cODnTPi/YhHeQYtk0z6O3RnkKz9yhYxtjs6kMyM2nq1BM/F+FbbvZbFYKP885uL+/X1qH9V7PjaJN6EHKyxEFLOqj4chZfj0MAn6I8zqhzj33m7ZkRm7/4O2/qkwpcyRAXfPkpnMqTPsCgUAgEAgEAoFAoCE6L4MlCQQCgUAgEAgEAoEfZwQjFQgEAoFAIBAIBAINEQepQCAQCAQCgUAgEGiIOEgFAoFAIBAIBAKBQEPEQSoQCAQCgUAgEAgEGiIOUoFAIBAIBAKBQCDQEHGQCgQCgUAgEAgEAoGGiINUIBAIBAKBQCAQCDREL/flO++8k5JM2Uy/XiZ0ZoI+Pz/H8fExAODs7Kzwu8PDQ9y5cwcAcOPGDQDLrO2TyaSQvZmfMdeVl0lZylorFfFXvvKVH5vEWW+//XajOq3KCZbL1txGtmf7DO/+unX6gz/4gzmw6H9mD7fZvOfzeRqHHEP7+/s4ODgAAFy/fh3AYswBi4zdzNBNMEP5xcUFTk9PASzH7NnZWWEc6ruBRZZvAPizP/uzWnXifFrVjrnM4Tl4Wbxz960oa6M65VBV/rYyjNcd93XG3pe//OXKuUT51gY0+zrh1TX32Ve+8pXGfcTfbpo/UMuVG6deRvmcnGhj3Hll03ezH21/9no99Hq99Lf+bjabuTKA/Wiv+tumctzDtvM91pUxRN06/emf/mnlHoLQuunftn+q7vP+r/osh6ZjT5/v7Y/svNCxYcfgdDpNaxE/030Pf8v1q9vtumOOv+dvv/zlL/9Y74u8PqzTT69qfTxcpXyw47TT6dSaS3WepahbJ5XjfK8d/8By3A+HQwDAwcEB9vb2Cvdx//bkyRM8ffoUQPn8MRwOsb+/DwDp95T1+ixvrcrVKRipQCAQCAQCgUAgEGiILCPloYqZApYnydlsllgEavSJvb299AyeMgeDQXo2f/cqoonWbtsaxLZRR1NtmcFVaHq/99vZbJY0wNTYqebOauVUi8nPqHEYDAZpzPE7apkvLy/T2CPTpOX2NBSqmW4LnU7H1WRWaf1V0+8xJi9zHK7T7y+7DNqnbWq9bTnYVx57orDaZk+DWBdt9IeOwyqNu45D1bzXYazbKFvu+ZeXl0l+EMpC6VoELOXDdDpN8oH12NnZSbLCWmhsOu+uYt5exfzUdqnTP56ctRpqZRVzv/PQRrvmmDFvPusY4WfKRNlnWWYKQGnd2tnZKa19Oh+3sTa9DLwKa8j/RnhyvI5Fyzb7w77X288oM0srJIKyutPppL9Ho1Hhnl6vl7UsWVd+Nz5I2Rd6DauLjj1I6QRnQ3DxApZCh9CNYlUZtolVG51Vv8sJ3baxapO1ycGm7vv1Pfrepu/USWUPUhxbl5eXabzwqguTNYcYDAaJzrWLXNVByk5q76DWFLn2UMGmC6U93Glbe2Yi3jtfdax7UG8T2r+5hcW+u67Zn3cw8jaYdi5tMmc9hcC6z9CxmdvI2c2gHqTqmjA2LZv3vypkgMXcYJmsfFDTX5afC/L5+Xn6m3NRTQE9Bce6qPvbdU1jX8bmVM3TPNPlOocqPVBZmWf72ft92/DMVXV+828dL/Ygpb+z8ptXXYf4ezVHqpqHTevSBHHAeXXhjXs7NnJ7F117rvowBfimrQTn0u7ubjLRI7h/0/XHnj96vV4yD9S5x3dQfjQ9UIVpXyAQCAQCgUAgEAg0xNqMFOFpky4vLxOlRmcvNaXiqZKMlDqNWVMJfs53XRU8xi1n1khYzXQVS7QtLVnTk/Q6gQBWMUxtsHGqhbSMlDp9W0ZqNpuVtINq2sexZr+7vLxM2gt+5o1B1Za02Yfanp4m09ZTy2U1mdYsyd7/qiCnMc5pV7dZh1xwBI+t8v7Plc+TD3Uc1Vdp7+tgnXarMt9TNsaa9OmcVdNZ+yzVqreBHFurWn5+T0sIBqfZ399P5ea6dXJykq6sJzWh/X6/MniNZ362Ck2ZqNz9rwoTpQxR1VjS8Z7TlOcYKXUraMpSbbI2VfWx1km16GoBASzbQq0I+Df3UPP5PI0znTvcP9n1Qt/dBrz2qTO2NxmDVzVWt21ufFXwWKScaZwdM0AxiBev9hltjy1FlZWH7qE1uBiZJZaHMttjpPj7fr9fMnv1rICamsYGIxUIBAKBQCAQCAQCDbF2sImq0JvA4jTHkyBDElL7OJ1O02/IDvA6m81KWkpPc/SybLwJLxiBbZdVjrFtOSWvAz3dV2kAVbNncZXtP5/PS0wUbWE9RmoymVT6QAwGg6TF4P181nQ6TRo+j5FSjec2ob4EqslkHWzZcu2j3+k4exljz2Otq9q02+26zDTQPhO4qpyrUEfD7bW9907tlyr2e5saQQ9WrnEc9vv9kqzWNAXUpquzrw0tbt+xbvm89gMWY8zOCWAZQpdpEY6OjgAsxt3z588BAM+ePQMAfPTRRwAWY1TTKAALZkrljn33Nsapx5R648tjMF+G3y77v9vtpn5nm/Hdyrh7Pni23J7WnTJE1y1+1u12KxmsdervMVK2L3RMe8GOrIZ9MpmU2CnuoWazWYn53N3dLa1z1lphXVTJL2/c5Fh6RR3W3buvDTTp41XWNuu+46r80nW/6aWJUSsXYCHHrQWLjmlvv5PzSV0X3tjy2pTlHwwGyTKA71f/d/Vt1Wft7e2VmDdPRuq1zvgJRioQCAQCgUAgEAgEGqKVqH32VDydTpM2hTbm1PZPp9PCqRJYavj0Oz3l8oTchsZlEw2crWcdrZ++z9PebIsd8J7rMUzUTFiN12g0qkxcqSxBU1+puvC0fpZ5UXaT9VCNpLVRV7tay3hMJpM0Huv6SLUJz0dKtUbUtnAeqVbXhvrUZ3l2x9vWQnsaK4+ptf3paaNtiFONuraK6VkH6jNH8D3a5nY81PWfyqFOHTaZS02h/Wb9WQeDQfqO446sz9nZWZL71P71er1akaKalK0KrO94PC6w1yw3E3Xfvn0bwNLn6fj4ODFQ77//fvoMWPhRXbt2DcCSwRoOh2nscr3jmJ7NZlvxU/H8gwCUmHllfzw2mNdtyTOCFinKMts1x2Pcd3d3K61fPPmmWnhlp6o+89qgLvSdNjqlltHKDbWIYB00SiTZUD6LY2o8Hpf2PqqRJ3JWQqtQhxXyWIOdnR3XF63qfsKbHznf0Fy5cnhVfILb9sXKsUIcP+pLbuccx9Pe3l76ThlQYCHXPb8pu6/SMmzS3lU+yVU+Uhz/1sKt2+2WGCnWYzgclqIae/uTptj4IOVNWj1IsSLq7KudqNfRaOQ603t/twlv4+O9q0qwe2YIOeQ2utuC1kkFnm7W7ZWbCPZlLvT3tqjr3EFqMpmUTI7UtI/wDiTWzGQymaS6a84Oz1xI/18XVWaqOpbUJMRm49aDr25g9ZkezX+V8A5GWh5uwG2Qj93d3XSoZX213a0ZlffOdccj36fwTKnqfJYLpW4/t39Xlf8qzPp0oeSc4AaQh4l+v5/6gZtlyosnT56kPrUBGrz3tGXap4o8YDGn+TfH1vXr13Hv3j0AwK1btwAsD4KPHj3C9773PQDLgxTH4euvv55+xwOYjmE1ayTaMrVS6BjR9UdlhV7VjNIL2mMPAqvkWlO5xzGh/aQmy7za9WcwGJTksR62vDWYVxvQYTqduocrXpsepPQQpgdn1pNXK3MHg0EKbGL7azKZpDlGcEydnp6W1ltdEzgnNzHx8/YvVtmYS3mgUFnnHVyBYlCobR/mX0Wsa0IIrDbXZZtz/IzH49I+iX177dq1kszg709PT135UDX3tDxN6mLL762fBN+lSgnOJQ0kxnLbg9RoNCq5O3jt2RRh2hcIBAKBQCAQCAQCDbF2sAnvBOeZ9lFbydOix0jxZHl+fl7SvHimSFXa/Kao0oh4DqueCYBqgDyTCt6T0x5sG9pGVlMxm81SHWjuQodq1ew9fvwYQNFxz9ZzVRma1tkzAfOCKngmEl7wAmDRN9S6WBO50WhUCjahdbOMwrYYHqWZNUgGtY/UaGoSa5tImJjNZiWN6WQyubLxl2OkJpNJMv8ik0Y5cOPGjVRPalup+VfGkXVrygjnwPG/yqQkx2J7zFSVKaCyhqsYr6p3twUvsIQNzKDMEvvt6dOnAIAHDx6k/1kn9t9wOCwxresmtNayAuU5qewQ7+N4un//fmKW+H6yT++++y7effddAMux9elPfxoA8PGPfxwf+9jHACzn5dOnT1MbUDay3pp8dRN4/e+ZLFv5wHbv9XqpTCwjy+w93xtbm4w3MlLeOq7miCrrgMXewFqsqFmptaRQlwAv8W9VQtKdnZ21Zfl0Oi0liSfm83mJ3d7d3U39Yvc+0+k0zSM1UwcW/Wb3Hvv7+yWzVWKTeaW/t/NVTTCVLawKhASUk8SrNYllcZUhtUxWlZXQVaCO/L9qVFmEqRmrutdYBlTlIvd+lA/sl+fPn7tpLOy40DJt0i5VgVv0b12bKPMsI9XtdksBW5ShygXgCkYqEAgEAoFAIBAIBK4IrfhI2VOo5yNF6InW2mZqwjrvZGgdOjeB5zPE54/H41LY3NFolP62J2dPo6ZXT2PTZgjJHEPnfca6HR8fp/7h6Z5a15s3b6bPbIJA1fTqOKgTeKIpvH5SDZYN3ekFI1DWSm3w9Tu1ydd+8sJv87oNVkq1p57G2TIDOzs7hRDDWkbV+nkJSW2dtlEXlot/s2/Ozs6SBpZjUJ1G6YdCDTsdsY+Pj0u+Hp5D/bpgIAHPH0C12nWCHXisk5U1qkHMfbaJc3xdsE6q4eN4o9aSsuz4+BhPnjwBAPzwhz8EALz33nsAFn1r/Te8BL5tMfTWuVr9lKjxv3PnDoAFI8Wy/ehHPwIAfOMb3wAAfO1rX8PDhw8BLBgoAPjMZz4DAPj85z+fxiTH7fHxMV68eAGg7Oen9WwDHruiCYVZNl7Zb91uN5WNId35rOl06ibtJtqQC5zb6rhuxzZQDo3vMVIcS/v7+6lflaUCinsIb7627bdmA/3oPFUrHJaH5aac0YAAnE8axhkoBk0h9vf3E7No9yU6RurCOuDr+3WNt/3U7/dL/m2637F9oUmEbYAU/cxaoWxD7lX52Fq2x7NMaSOM/rrw9gjEzs5OiYE+Pj5O5WP7a/AFygy75/3oo48KgXuARX1tQKa2LMSIOsEmdnd3SwG4lJFSnygt/3g8Lvl9eT5S1oJmFTZOLV9lYkHhzas6gFYFOKgyh2izo3QjbAeC0s80SdQM9/zM0oU7OzupDhT23AAeHByUAgVoDpZNIu00gQpB1vf09BSPHj0q3MeD1Kc//emCUzVQNOXRugOrN5ebwE4s71CgmygvTwBQzGVio8HpgqAOmVURrzZxFl0FzymbY8ea7uzu7pai9WkkMY38BBQF7baQW4jYdycnJ8lklGW7ceMGgMWm/fXXXwdQNH0BFu2uB3pg0XdtbZC4wdGNvzX1VFNeIheYRP+2h0A1v1RlgPcZr9s+wOsG3R7c2fZPnjzB97//fQDAd7/7XQDLnEu9Xi/1mwYKaMOkz0JNf20QksFgkGQYx9PR0VE6lPMA9Y//+I8AgO985zu4efMmAOAnf/InAQBf/OIXASzkIeUff//RRx+lg5TdKOqGfhN4wWj4DsqCu3fv4o033gAAvPbaa6mewKIteIDiuKFJrW5QcqZ9m0BltjUdUiWPldW9Xq+0QdLDOeWfvQ6Hw9IGy8tT5QXtqAuN8FW1YdPgRfxuPp+X9gmq/KI8ZL9y7lxeXhbysQGLtdvKdl33mq5NarpVlZdQlcme0oLwDlmeeWbOJNCizXGZMxf0xh3l4c7OTilKp0a6s+tqG0EMqlAV+EzXBvbVs2fPUp+ybix3v99PMk/3hcBijPK5evhgv2lEVluuTZAL5ESom4a9drvd0gFQ52cT07665oph2hcIBAKBQCAQCAQCDVGbkaoyRasKNmGdMHkSns/nlfkkthEutg480zGWm9q7p0+fJs2eDQsOoKQlpHb95s2b6W/V7qopgmITZsfTRGk/WUfRyWSSzFtYN5rAfOELX8Bbb72VngssNbFnZ2elnANq4tmGVsIbVx6DqGYqLI9nOsIy2rFHqMZcmYcqTUVbzq85jbAyU+w7y0z1+/00liylf3JyUnLG9piTtlBFj6tDNzVDz58/TyZSBMt67949vPnmm4VnkO2YTCbu2ONvNx172q45kxXLVnlmoIRn2qdaX6vdVVMeqxVWs+M2oH1lQ51r7iSCffa9730P3/rWtwAA//3f/10o6507d0osguYraWP8qcy2JmN8z+HhYQosQa3raDTCd77zHQDAP/zDPwAAvv71rwNYzJ/Pf/7zAIBf+ZVfAQD8/M//fHrmBx98AGBpwvjhhx+mftRxw/vblIPK1HC+cz25desW7t+/D2DJvLE8o9GoZF6v5mhewII2teialsKGJddcL9YE2XNqV1aD9WMb6P+Wpdrb2yutt8pQNa2n/lbdAYDlnkDzEpLJVFN0zjENOc0xSjZRneltIJWLi4uC2SSfz2fZ9a0uVFZZ9uXk5CTtfbgXePHiRfrMMo4su9aX/XR0dFQKcHV4eOha7wC+FUBTWHM8Zfd1rLEMNk1Cr9dLbA3XI1r0XFxcpHrruu2ZmbYBa7XC62w2K6R0ARb9ZvO5ss/m83lJdpFlPzg4KJnxXV5euoFAtgF9rpcb1LrTqNmotV7S9dSzWNrU1SYYqUAgEAgEAoFAIBBoiI2DTXhQO1vPsSsXKtxmGN6W1rzK0VDrABRD6lKzxBM7T/mqAeI96rzpaQ+qEtttqrGwrJAGxOC7VGNHbRMdxnm6/+xnP4tPfvKTAJDYAWpfnj59mv5WTYXtu6vymQKKmn4bQtUbS9Z/R1kGbzxeZQjUOiGP1S5YQ7gDRW1nLhz/tuCNZfYVy3h6eprmig2k8dprryVtoIbcB4p+X4SXRHdd1kPbtcoGu9/vZ5OEeppTL6AE4IcBHo1GqZ28q63/OvCYDuvUPxwO0/dkrMk+ffOb38S3v/1tAEhO8tSkHxwcpL81ga+VD5tAZQDbUpNMAgtmjFp+tvsPfvCD5BP11a9+FcBSjv/Mz/wMfu3Xfg3AkpEi0/O9730vJev9wQ9+AGCh1bXhrFWDvokvYpWG1GOnh8NhmvNq+QEs5g/7jlp0+uI8f/68pEVX1r4NLTq13crWWvk9mUxKPkaaOJNQPzQ7VpXV0L95D8vhBadoyt7Y9gGW81nnpgZmARZ7Aw2QAyxl3+XlZRqr1oJlb2+vFIRhPB6XEporq9e0z1SGsn/4fpVVfJf6jnP+sJ78X0NNW0ZB91O5scFrv9/fWG54aRIsI7W/v5+YMvqLcx+0t7eXZB3rQWZuMpmUgiapH3Yb+ztvP+AFprLj+eLiIvUN5z5lwvn5efqtsoX8X1kewE9urfuVbewtqvbqlrH20tZ4Cbhtufk8vTZm+SdQAAAgAElEQVRFMFKBQCAQCAQCgUAg0BAbR+1TqDahilHSk6Sn8fK0eNvQ/HthLvWEa/0jVENto81oxBYbDnU0GpV8IDREt9VQb5MtsBrb27dvJ60TI2/927/9GwDgrbfeSskoGcGKLMGdO3eS1kk1cNvwkfLsV72oO17iZNu2ORayKtx127bNVdB6eOWvskfWaIzWl0eTJXpaVM/vq00oK+TNC9aJ2lDaot+6dStpx6jl02SifIYXLagtRtd7phcGOBeVzvOZ8hgp6wel/WZljUYgawMaTlaTofI7znMy1vSL+ta3vpVChbM8lCW3b99OWnWbNLEt6Bxhv3D8U6N8dHSU6vfhhx8CAP793/89yTgyNGTcf/3Xfx2/8Ru/AQDJP5TM+3/913+luvOzwWCQmCDW0zJCTeCNXW8Mekk4OT+oMScT8ODBgyTbyaix3y4uLkra88Fg0GpURfaF+hxan4aqiI7W/0gT1FoWhMzAtWvXSozU9evXS75U6oujyc3rIBcWW9d4zl2W9eTkJPWTjXo7HA5TGckIsO2uXbuWyqhhqD3/SZahaR+qb19VKhcNSa/+1hybnu8bWScbBfn8/DzJcctaAWXGxRv3dWHnYs73ptPplOQII39qMnKOI/VH0si4vLYt91h+L8olYfcBwHKfxvlCX9eTk5NS1F/1b2WdNYJklc/6VSBnraPzsiptjXc2aaP8rfRyrkBNK2k/02e0CW9i6sDzBgnLwft4ILm4uCjF6edEU/Oqq9qU23IDxbZlGe/cuZNoa5qr0KH6n/7pn/CFL3wBQDEnCbAQ9Ky7hpKsMitcp77eQcoTrl49PTqX11z2bO9gXfVu3cC1DS8bvF0sddNt66JlzS1I2w6DrodDa+oLLBcmmrRo7iiWjZsQmtOquYiaHbS1+fMUA96YsQciNe/NHaiqTFeA/KFezYI3WZzteNADoQ3YcX5+njbdNOP7r//6LwCLDTrrzn5j6O179+4VNtB8X27uNYW2m5XHlL29Xi9tsFn+f//3f091Yrl/9Vd/FQDwW7/1W/jpn/5pAMvN4LvvvgtgkWOKh0m+++bNmwXzK9YTKCrL2oQXcptzA1geGLlheu+995I55vvvv1/4bmdnJ81BDblt8zVuIuc4DobDYSm3mKYI0b9ZJ26+bdqR8XhcMhX2TGU1RDU38LzqgYqH4bpQ5RT/tvJnOp0WzN+AxWFPzfx4H3/PMWQPVBqyn22hIZ49h/qmyhZvo0lowAiWiYqSGzduuGa8wGLcsGzWLeLZs2fJvIxX7XMbSGOTtA9Va7iX863T6aR2ZFmpcBkOh0lRwT7V/mvbTcMiJy/1QGVNf/f29lLZrMx4+vRpGlvsR0JNaDUoStX+fZsHKm/d9EgaYD3z5E3LHqZ9gUAgEAgEAoFAINAQrfKOnna1KrAEUE6E6yU1axveydSeXlXry5O4Uu8MEa7aE0vVKx1uHV3VMd1qSbYFZWqosTg4OMAnPvEJAIvgEsBSc/Tw4UP8y7/8C4ClwzXvVe0ZNUca2rUN6LOqTDA15DqxKtS0x4wA1RqOqnevEzbXg1d+1a4CC02jdfL1HHU1/C3rdFXw2sILBsL67u3tJa3m3bt3ASw1sZeXlyUHeWrNxuOxy9JYLeC6GiZqjEejkWuCxLLXMaNUZqrKjNIL3+2Z+7UR/txjdzXps9Vanp6eJqaaZmEPHjxIZWX/MRksTeRu3bqVtPzKfm5DY+mFbWc/XFxcpPKSWfrggw/SfT/7sz8LAPjN3/xNAIvku+wzJuv953/+5/R7zjkyWbdu3UpjVs28Wd82559qZG1qjtlsluYLv+O8ee+991K4dprzsIzqTL4q+NK6IEsxm80SI2WDyxwdHZXYp+Pj45L5npqHVZmFKQtCmalhynNsf12oKbU1wdUEu5aNOT4+LrAvwJLZ0aTiHjPFvwmVKV6o56astSb81XDnwHLcaGJ4ziEN5GGTxXspD1jG0WiUxpzutarWrjbWWyvz9G9dN9g3ZKDZDru7u6kvOb/UzNwym9qnm4bXVugex7L8apqobDP/1qBNwKJvGYCC7LEXMIXPnE6nlabrbQWbyD0jZ+WibZALaGef1ca6FIxUIBAIBAKBQCAQCDTE2oyUnua8E511gKd2VzUl1Kzy1OsFYVB4rM0mJ2D7PLUzVSYKWGiHNKkjUEy+x7rkEoepFtvaVueCK2wC7Rt7gu/3+0m7+rnPfQ7AkmF6/Phx0ubSyZpQh3fVxFad7NfRVGj72aRznnZB0ST5KeGxoV5glG37uam2SbV31t6eY0kT8Hl25TmH0E3Zm1wdWDbCJktVLRmDTGi4YM4t+rNoAkEbCEYZqU1B5kttzb3AD95nVWFYPV87wguYoiyVjl3+35SRyrG7qjHluziOHj58mNgMBljgPUdHR8knin6WDEZzcHDg+mi0qQFU+aCO8lrG58+fJ60yZVmn00myjr5RX/ziF1O5v//97wNY+IgCy2S9z549Sxp3jtcbN26UkmFrH7YZ5p2YzWap/1W7zDpTPlBj/qMf/Sj9zTmlPix2HLTpbwiU5w6wXFN1PVJ/JqAYqMD6FWnqBC/0N6F1s4ErdI42lR269lUF0NB3KsNL+UKWhyzAwcFBZWj0w8PD5L+i40HbFCj6j1iLi1VQZs9jyHm1n+V8yJWxs+kBlK3JBeXR9WtThtfzF7aYz+cFqxxg2VfdbreUpFj3sLYeGg+g7f1CFcOq6xDn2dHRUWI+lXkEFnOLfpU2+br62rH/NLS+12+byLyq/bhC10WbNoRt0Ol0KgNwefNd1+B1yx+MVCAQCAQCgUAgEAg0xMY+Up6mQH2MbNQPZaRU4w4UNR5ELiRsW/D8BuqEaFftTFUC4qqocG0yHHWYBe0nnuDVv4DaZWqj33///dRX1EZTe3b9+vXC6Z/PymmM1j7pSwQWy4L1er2CJoblsRp+9TfhWLMhb6vYm6qIP22GoFaoj5TOD/oHsNwsh2qNqKHhvZrUsk6oUi8K4rp14PP03cBSM3R4eFhKBMh6P378OP2GY09t0dWvByhqmTYtP+3jvXHnRc7Tz6oY06Zh9HMh/FVjvA6qtIkaQpsMwMOHD5P9POeQJm38+Mc/DgB4/fXX02dAMSSy+satkk9NoJG32Bd8BlmZR48epfHDcnzsYx9LvlG/8Au/AGDJMD18+DCFRv/a174GYBkFbzAYJN9Y+vSpT0SV/G8L6iPlvYtzn32nvn5sK8+/xSbyzWnRN4m+6rGyyubZKLAqqy37dHZ2Vgijrd8pO6d9YCO1qfxu2ldeygC2n/oO23KcnZ0lRorR3zTprWWa+UwN6a5skfaZvkf9R5rWCShHSNN+t4yMJkj3/I9tu3uh1HWPaNnlXLTndbEqqp5lN70owVY+VzG527BcqbJisO9ku16/fj1ZHnHesB8uLi7SWOQcZNtzbgFlXylge9Y5XoRkQpllaxmmvlKWkdKw/XbvtmptqoOtmPZ1u92S0Oa11+uVaHZCM00rriJcuIXn0F61edbNjRUmXl4goN2cS8SqjbAN7qEUKM0I6Dje6/XSRGK56SgLLNvHO3S04rznbELtBlqDduSyzXtBG2yo5/F47B46coEu2kCVeYGWfzwelza5xHg8LmW9182FDUBxlTkftB118wEsNuRW2FEwPn78OP3NjYcGSrG5b9o0MeAmXPvFOgx7B6OccsQLhrHKlLcq18UmQj/3nslkktpcHeI5figfqEy5e/duUr7Q5JmLc6fTcbPeV2Gd+thDK1A0oWI9+BlNW95880381E/9VKoD6wkA//3f/43/+I//ALBMA8G+vH37dgq6ozlkrFnJVaQUsG2rm3C+X83D7KaUa/PBwUEpZLW3Yd1kjdLNeB2llJrUs54cc9wvjEajkimgXj2Tn6pQyZukE1Czfc4P7nNGo1Epr9B0Oi2MTaAYYIfg73RDzDGn+wurWCTWUbao7LJy2ctn6JnLWzPkKnN5oNjuOt7sXkn3FFcVPMmuv3Vym171HtWWUQ+v/I79d3R0lOaH3Y9fXl6mNdYqVU5PT9Pzde/F57d5kPLMzz2ln54duM/hOFKzRTsv1Z3FCzBStd7WRZj2BQKBQCAQCAQCgUBDbMxIVSVws4yUUmvWCVNPmTnN+VWd+lX7oadiyyJ5rIm9aiK5XDIxffe2YGlp1dSxTtRQ3rlzJ534rZnO+fl5IdP1NqBaCU0aqtfBYFAy7csxUqrJtME+RqORy95UaU83Ma/KwaPtx+NxKdy5ziHW3c6r8/PzpN30wjJva6x5Id3Zjmq6YTVDyoCy3GpewN9bc5G6TGwdWGfcKnjmIVWskxf+PBdG33vmtuCxntSaT6fTkpO4BlwgE2WT0WqghZyGb5Pxp+1i5znLP5lMSqG233rrrcREsa/pVP7Nb34zBdfgHNEQ/WRGNKhFjvHdZkCaqndZCwNdtzjf1NyljjP2JtB20dDgCo/hVZbFBvTQdle3AP5vUwZ4waDa6KdOp1MyeWK7q8m1mp5y/SELSrOqy8vLdD/vYRl3d3fTPkrrVGWuuA57o+tmlembJ8f0fy/IkGWYtP1XmZnnynKVuEorjhxybe0xUhwPw+EwyTHOJY7bTqeT5D6ZKY5l3Y/rvORnbafsqdPnuT2RnidskBNlpLyw7ZvuiYKRCgQCgUAgEAgEAoGGqMVIeRpf1Yp5wSbUDhsoOrJZzbk61V+lL0fVO6q0OepYqFd9lpcgrY4Wx/tuHTTxU8qFmN/f3y85sarGOVfONnylVJtvfaNUC5hLzmqDa1xcXCTti9XmqSbTJniz5eF7tsXGecwhy0YtjLK4VtOr86mpj1TT8Vf3fqtpvry8LNn2q3bZ2ttbHyv9rE1tpafZq5J9q1CHrdJ7clrhtjWzVkM8mUxKTru9Xi8xORokhFcv6S7RZqhzD8qSc2xbdqLb7ZaSPjOwBLAMJEEW6v3330/P4LpFJ+2bN2+WfEZ0nFb5grSNKksN6zdImTAYDEo+DRqsxzL5bZdbQ3rbcqg/l2fRUeX/qv411LCr75CV46pZt8zUOomita+1nYFiaHfPj5JlImtKZgpYzjEvnLN1mlfZmUs2Whd12CFlzbRuXgqXqud74cy9vlhX5v5fg8cCEp6PnbXE2dnZKTH5nD9eGP1VzGUb/aR+dEBxv6lnB5aXZeL6pYwUGWL+bjgcFtg4fmfHXVOGaiumfXqQ4gFKg02w01lxNoQXbMLruLbhNZY3yT3BWPUs/d22zMDWhda3iqbViH5e9B7vs21AD1LWCXY2m5UW4F6vVzrI6cGCBynrrD4ej0sL8OXlZamvdZK3TW1b6EHKCi3dzHkR2Pg7z7TvZSxKOXNYlkedrK05rBe4YZvtn3NArTpA19mMePA2Its6SFVFnZpMJiWFyXA4TAsRN4r8f29vz41+xGvbwSVyz7DmLWyj/f39UpCMbrebTFjo8M8cU+fn52nd4oFRTWKsPFGzbQ9tyMa6Y8qLclaFbeWM8qAbPeu4rofhOpEu9X/vfqCY403HhQZ/st+te5BSk2VrSqRrkx4YrRJZg9tYc3kNxmUPbDs7O4Xn2vbZBFWHmU6nU5LH3rvqlEOfvc0gErlD3ctA2/ul3JrE79TVxsqF3d3dkiJKla5eFO02g9F4z7FXLbNniu4dpGzUcP5O1y3vALjuWAzTvkAgEAgEAoFAIBBoiMaMVC4cr5rf8CSolBpQn5GyQRAUV+HEa0/FqlHLnci9k61HU29bK1LHvE7r6TFMm2q4NjHx80z7bNAJT5OpIdGtBlnDn1tN5mw2K5maKntjtSPb7ENvvKgmhmVjWT3nSaCYT8QL5XqVmrlccAbPPMFzSL8K5OSNjgVvntvfah1y8swyQZ788diqdWDltzcuNACBDVBgTce8Z1aNqzbHm77Lak01cILNjXJxcZHmOR39NYwu7ycjxXWs0+m4qS0s2lqbmrSVvtMz87L3XaUDv7emelYnXnCnqgBOHmNLKFvlrSE2L5sXonsVvH6380TNkXi/WktwDOl6pGbLQNEM0ZplAmWZ0oZc2PQei6sYY01xVWXa5ntsX+TGsIa09yyPCM+loc3gLE3hmYx74c8JzqXZbJbqq7n+gAWja0379LkRbCIQCAQCgUAgEAgErghr+0ipBslqaNRHilAbzSY+Uoqr1G5Y7eq6DnXevS+DCaj7zqaah5ymXZ/TtM65YBPqu+WF57SaDM9HynNotIkcVROS04puG15AF2UUrEZS2RKrgb0KNtSWcZ372g7CUhc5B1qPYco5eK8TlML+3yaL4JVfP/N8LSwb7DFRVf/X/W4d5BhBlRM2WbWGzeX6oz5VXKc0uTB/b/1rcrJy2+yARZ33vwx2oA4r68m3VT5STedFFaO9ztrkaf91/QEWY88yUjqfrF/ozs5Oae/jscW5YEee78dVYJ15/yoyVZvgZddnlY+PTZFCeOucZxniWWO0DTvPdY21n00mk7SX42c5HyliVULedZmpYKQCgUAgEAgEAoFAoCHW9pHymA7VCFqNnsdIUfNPjMfjKwsj2xSeZo141SLDNEFdRknvvWrYSFQa3YiaZvXPq4piN5lMkn+EZaQuLy9LPlKaWNSWpdfrXSmzYzUl6ldT1Xfe766SkWqKbZSr6TNzjEsuMtIm7/TQJsNRB57G3vNTWZeZbxveumP9ADQZrbLNnOf8TtMqcN2yPjXKfr9KTFQdvMw1tI5Fh/pP6WceS8Vn5XwI65SHWKdtlGGq8qH10jTYvwE/ambOd9Tzo7R1uer+flX2aIHiuNCxaeW3fpfb370MGZ+b07qXs4mrNUIx60t5TmhUT8KTNU2xtmkf4RWg2+0m8ytbIaWwNbwnsJ7jZ1PkBsa6AuFV3ZgqtknJAr5gt+9fB7qZ40ZJNzQ2nGev1yt95h2kCN0weeHPbfmVfr7Kfq9616r5sioAwI8ztrGA19moKLx2bVOG5YIGrIOq8aAHci+wxbrKlKsYd97cJKx5lG5YrYmHmn3YPvTSBrQ9/rahsHpVNrlendY5XNnfEbk0KavmbRumffZdugapOV/VQUqVdra8VQq9V32c/G9cc34c0GSP+yqSAFaOaxoaYjqdlhRi6pLhKTSA4v4wF8Cq6b4pTPsCgUAgEAgEAoFAoCE6L/v0GQgEAoFAIBAIBAI/bghGKhAIBAKBQCAQCAQaIg5SgUAgEAgEAoFAINAQcZAKBAKBQCAQCAQCgYaIg1QgEAgEAoFAIBAINEQcpAKBQCAQCAQCgUCgIeIgFQgEAoFAIBAIBAINEQepQCAQCAQCgUAgEGiIOEgFAoFAIBAIBAKBQEP0cl++/fbbcwDQpL25BL6dTidd9W+92r8V3rPrJgx+++23/YcavPPOO6U61X2XLXeuvlW/se/JlaNunX77t397DgB7e3u4du0aAODg4AAAcHh4CAC4fv06rl+/Xvjs4OAA+/v7AIDhcAgA6Pf7AIBut4udncU5+/LyEgAwm80AAJPJBOPxGAAwGo3S9eLiovQZr5PJBADwO7/zO7Xq9P/+3/9LDcM2ZHn0f++zqvub9lNd1O2nr3zlK6XOtu+9vLxM7cw2ZtsBSP118+bNdN3b2yvc/+TJEwDAo0eP8Pz5cwDLMTUYDFJf83fdbjc9n/d9+ctfXrtOq+poYefAfD6vPe/rPJ945513Vt74+7//++nFbJfBYABg2fYHBwdpnvG72WyG4+NjAMDDhw8BAB988AEA4P333099wjnCZ92+fRsf+9jHACBd7969m57PunEunZyc4OTkBADwu7/7u41k3jpjfN2E7evOp03mkkWbyeavQj6wn3Joukate88q1K3Tn/zJn8yBhVyz64he+Tfv0fu9Ols5rrKe85afeWuC3s/v6sgHrdN4PMaLFy8AAGdnZwCW6+jdu3dx69atQp1OTk5wfn5e+Mwrh4d1x3LdOnljr41xQrQpR+qMvTpz6WVC61V3Lv3RH/3RVvsohzr9p/e0KceBcj03ke3bmEvBSAUCgUAgEAgEAoFAQ2QZKZ7cVENMTcoqUCtUxQ7Yv+3/fJ/32Sbw6mTr5tU31wY5FsRjTdpmRk5PTwEsNHu2TloO9kmv10vX3d1dAEhX/Y7l5u9U0+f1p4UtSxOQXVF4Wkjb7sqkVV21Ll6f2PddNTyNre0DZUmUFdHraDQqtePu7m7l2GsLddvRjgtvruWez+t8Ps/WpUk9dd7Y93Bu9Pv9AnPLslsmkVrqs7Oz9LfVvPd6vdR/vO7t7aXnszxkpObzeW0ZTKzbz6tY+6rnXiXztcmzXtb8rkJuratrQWE/89bbHMOzTVQxUpPJJH03nU7TvZaR8saeXYd6vV5p7ev1eoV5qt+tAz5rb2+vNNefPXuW7iHrTEsQfX8VM1VVtlzftY1XgYmyv7/KuVqnzFfF/Co2Kdema8Aqy6pN3lH129zzdL7k2qXNM0MO2YOU0uzewUL/V3Q6nSQk7WZcN+G5zXjuULVJ49RZWLS+vHrmB/zfg9bXmhroZp9l0I190wGpmzRbRn2W3YzrQYqbRP6vm9PcQcqDbbumplr6W2/sKbwDe+4AyL/t+FSTkKs4XFU9RzcQ3FRMJpNUTvaPmpmx3DxQ0xTw4uIimZKxf7Uf7DxsukFfVafcgU3LoXIGKCoEvHd4yooqgd+0v/Q5ts31yr95z3g8Tv3FNmd/nJycpDlq540epGgWNBgM0kFKzTpZvpzcaQOe6XGTg+0m72sDr9oBqS7qKAM8WdpU8aCfX9XhylNEcmxPp9M0d/Rq5YJXNru2Xl5eluatgvdvskHnb3u9XjKh58GI5r0PHjxI85pmunqgUmWXLbeto/fZtg5Ur9IhCni1TNeAdl0CmsKT+7k5rZ/lxntTJWSTw9UqePtx77l15kauHCpDckqqdeVgmPYFAoFAIBAIBAKBQENkGSlCtaBKvfNqNUdqcuWZk1mNkWd65ZnttMFMqWN9nZOmatFyzrK2DbyyeSwR66wMSY7t8UANuGr26rA3vV6v0C9A0cSPn9myeuXzGMRNGClq5LVOhLa1/c5jLFiPbrdbYqlyzGEVS2Xruwn4HB03rBPNRi4vL9N91HJSE3pwcFDqa2o5z87O0thgYAmgrMXV/lyX7VilWfJYLzufWG9lVu3vtZ9Uu9y2ZlDHkZ0b/X6/NDfm83nJzIeMlPYD+4/P7vf7qW80CAifb9vBa5u2kJu3nhlHlXlV7pned4EFvPliUWV2XsdKwj7bM1/Vv9tkqXQP4Zn2UWYpC+8FoGD5q6wOlC324MnxpnVSGca5e+PGDQBLRurRo0f4n//5HwBLRupTn/pUuo91e/r0KYCiKbvuUa4KrxoTdVWow+Tq/3XW/1UMx7qo4+6gyM3zOkzQOmNiGwxdletMk/17HZce7377d2UZV94RCAQCgUAgEAgEAoECavlIqcaLV56OlTGglsXzr1H/ArINvJ/fKWPgwWOm1rXJVD8ly5p1u93C37zak7vHjKhGjf/b9vF8sDYBtd07OztuX/BqT/XqhGuZKa8NtP7sQ8/pz7s29b9hiHYNtc421avVbnrIsRms487OTmEc8ur5UvG7dbUvOW3KbDYrjSUtJ1kL2tsfHByk/mdZlRGhppd1U5atTsCQpvXSqz7fs1m2/ar/W/bZ9pd93zZs1+07OeZ3d3dLjNF0Ok39wPandvr09DTV0Qap2NvbK6UgGAwGqfxsE48ZbxMe0+EFr1HZYWWGymc7H7elpX1V0AZ7o/OmCdunLKV3tWXjdx4DVtfaoC68cWsZd02dobKAf9sxqPPdyuzBYOBahdg1e1Vwhxy0bTn2uV7dvn0bwIJpevDgAYDlnD86OkopDgjW8fnz56V+WeVD/jLmUW4c5qxx7O9XtflV+R7lWCcvWFVVuVR+quyzsnqTPqMfXg5VrFku+FvVeKvLZG2CnP+jZ5Wm91TFWlhlGWHH687OTul809TqLRipQCAQCAQCgUAgEGiILCOlfg1V/gyj0ShpvalhUi2Y9TNQnwB7HQwGJRbE0wLkTqqroFqDqlNnp9NJmi4NTcy/rRZMtUNq923bR9vJauO1XZtqLTxGyvpKaVupBqKKkdKIfutG7fMYzbp47bXXAPiJfvV/tqMypLnoijn/KT5D26DKp8qLsrQJdPxoXfhOq/k8OjoCsGCkdKwBSAlbT05OUt1pp+/5gm0CT3uV06RqxC72o70qI6W+RLza9zT1e6wDZce9qH38Thloagy1/YGFJtH68rE+w+EwMVKUg/1+vzRfVD7YZ20C1dTlfD/t+N/b20vy0PqLTafT0hjWd/04YZMyb/Jbu26qnLbztirSJ692jfGYAx1vbfpM6Fpgy6by3MoAjYJprStUG61+i3yPt55bmWfH7Dp1mk6n6TmcC3fu3AGwYJg++ugjAMB3v/tdAAtZzUTqTNar+wUy2Ky3WktUsVJanm3DY/417YPnn23HUm5fp/dtEx6romuJZ53De7y5BxT3HXbc6n2b9BnHh/d7L5Ke1i2XEsauo57Vlq7pTZigVcj59Hvwym2vXlTsVWOtql/r1ikrTbQQ9kXsOHUUpUnLaDRKDWQ3RHt7e8mEhZs7/Z+bCd20tGmCpOYCLLetr242VVBTWOvBj9+p0FN0u930O29g8z0s187OTmNncm1ru4HRgWonX87UzTPt8yhu3cRy8OmixmvTOr3xxhsAFguqPUBxw6pO/Hq1B3u2rZebRE337OFKzZdy7dMG9NBnBXKv1yuZ9PEgNRwO8eTJEwDL+ff8+XMAi428NZXVwyH7sw2zq5zzqh6k2RdnZ2cpGAMXCJZfnWptaHBVcijqmEA1qcfOzk4pyIQGYrHBM8bjcSHcuV7Pz89Lwp4y5Nq1a0kO8kC1u7tbCDQCFA83bRykrLmUZ1KqyibOaQY5uX79eqFP2AbAoh+tWVbOhOOqUbUJWWWStO3yqoy0Gy49OKiJKVq4S78AACAASURBVFA8FNg+VEWUHlJ4jxfQoU0FkSfXrIn2xcVFkum8qpLMO0hRhtk0AV4bqqzW9B72vqaYzWbpvXwuFV0f+9jH0kHq29/+NgDga1/7WjpA/dIv/RIA4P79+wCKylX2k7eZexkHKm8zqnLDUxQT9qCrz9yGOXadZ3kyXg/ENq+fN89yCnN9dxsHKILrie6ncgETvP27p0D23BxYb0+RbJUR3uGmLnTeWkVezsRc35vbt3kuOjkz5nURpn2BQCAQCAQCgUAg0BC1TPu63W6BztbvgHJyudPT06RZsgkld3d3kyaT2htex+Nx0njynsvLS9eUTq9NoFouz8md/2uwCL1qOVTbkjNXtBoN7ySsTElT9saadXjl9kKQe+ZYuVDhOh5s6Hr9zGpJ1nGQp2nf5eWlawoCFBkpDTVNZoNj0DMFVJZKr/r3dDp1mSj+35SR8szg7Pun02lp7HW73cRWMHwumaler5fup8bqxYsXqf6cR16QABt6fx3k6HHVWrKMav727NkzAEhX9uF0Ok3PsqwHtYT6mUfl2zI0rY/HSFGr6jEAFxcXJYaNdR2PxwX2WuuljBRlhvapnduaqHQdVIV+9RipTqeTykQn+rt37wJYmCaxLpxf2o+WdZjNZpWawLY00bm+1npbcxLPvMTTutqgGm0zVMoE2rmpzJRlSIfDYRpfNuGsmpzasPzn5+elIA9qzklswt5448uT5ywjy3ZxcVGyrlDncBsARtM7eGkLrJZb67NuP+pYYlnVxO8nfuInAAAffvghAOCHP/wh/vEf/xHAch797M/+LADgzTffTH3B+3UevoyQ6B50XAGLMUR5xyvbYmdnp7TX8yxZts30VpkX9nq9kjze398vWQhoygobbE0TrXvuL22ssQTXdzXv9vYxHntj28CzrLH7VE0loFfPDYTXpuNU9zpWLuh+3HOF8SysbLnZv8oy2nW83++XZIa3V8shGKlAIBAIBAKBQCAQaIisSt1zdrS+JYPBIGlSeM90Oi1pwdQHiKdDnrA9LRQ17rPZrKCpBYqhnJtqM6xvg9ZJ/TdYNmpZTk5OUjk9R2qWjWWlNuPg4KDEvKkvmDqYs3xNmQ7tE6txyyU5VdtZzx/KsjGqrbeaDdWa8bNNfKTIvCgbau2Sx+Nxybb+9PS05KvisVU2gMV4PHZDqVuN8LZ8VfhcZUOJ3d3dNHbYLvxffSDICNBH6uLiwp07lmH0wvK3Aa2bZQ6fPXuWfAjo46WMlGqYgeV8ms/npTHracK8EM914PlVeHPDjsWzs7OCrOBnvMfOc2o9Dw4OUt1UBlgLANXcravd9HzhdDzbNhsOh0lz/vGPfxzAQnMOLHz0KAcfPXoEAKk/VX6y3MqEtsHo1B2nnuywvpaUAbPZrBBMAyiGpPeCm7QJ1S5bPwe23dnZWcna4/LyspQegeXXAESst/rw2bGqa3AVi9MEyvrZsayBgqwf19nZWSlJr7YJ6+v5Ylv/KWVxPauMdX0lOp1OQW4Dy7F0cHCAT3ziEwCAx48fAwAePnyI//zP/wSwDDZx7949AMAnPvGJFBqdbcBkveqruCokulfHdeGx+2xvjsEnT56U5j/bot/vJ/mh7CnrYeeRZzXTRh2q2JjBYJDKo76fuk/jfYRaXQFFf7CcJY7HEq0bVEz9xi3zr+uD3btoG6h1gJfeg1frL9bv90uB2PR3uWTYOagllpVX5+fnST6pnGIbWKsetY6yPtYa3EmDPNm4B3VTMhHZHbuadlVF+tLJpUKQ99mJp3Sn3dTqJtKjKu3myotgtApsUC/uv9L01jzs+Pi4tFFSEwkbPc6LTsZnHRwclOhjzxSwLlSA2knE77xDln7vRT6pCjahplSEHv68bPNNN39KzXoHOaBo9qeOuuw76/SvGwfvkGWdscfjsWv6B/jU8jrIbWiJvb29UpAJjpuLi4tUFx6k+P9oNCoFC9CDlC1DW7AbsNFolMY+D3m6ANtgGWomwXnCeqizvQpyu4lQpU4TqDLAmgeoOas1Q9DgGWx/dRrnGFYFC+tHmcTne5GwdBPadC55i7dd4C8vL0sb0Hv37uFTn/oUAOAzn/kMAOD1118HsGhf5slhfdmfDx8+TH2pB5Oq6FD277agQU48E0ydJ4Q98K4yH22z3HrYs4cZleccJxxLR0dHKRoco8Zxo354eFjaDHlmpbq2adAolgdYT9Hi5Vq0MtsLiKGR/GzgEm2DXDCOOuXe5CClsPucwWCQZPbnP/95AMCDBw/wD//wDwCAv//7vwewNGG/detW+pvjUzftfL7uDarGXqfTTo4p+4zLy8tUJo6XR48e4Qc/+AGAxbwHlu1+8+bNNNZ4YOQYVHmg60WdXFRN4QUjABZtaRUm+/v7JZM+3UNZkz7dT9h+W3WAbwpPKWEDfKgZnCrRq8rhBVbTqz1gaLRW+906e1c1m6wyy9d2176w7gK699b9gpZ1b28vrb16YK7aj+vBMYcw7QsEAoFAIBAIBAKBhqjFSCk9ak+tHjuh2icbGlMd/m28/XVMCJqawWl4dZsDQjXPlo1Rtod18czJlKUCivkwvPCtNk9Ot9td21xRTYI8k7RcxnfriKmUb908Upbp0JxLTfuJ7aimXLafdOypKZQNI6tO1tYxVjWx1sxFzUvU/IfXdU1eVGPo5X9Rs1lgoS0jE0UtJ+ffaDRKTBSZHdZjPp+7rIpn3toG7PhSFsCayj579iyZrrD8bON+v580QxxD2hY2bYIX/IFQdqsO6uQTAYqhm4FFm9sw7ryn0+mk8qtJH6+USTpfqkJFbzLugOrQ35qmgezGxz/+8cRE0XGe/fL48ePkFM88Oe+++y6AhUkS5Qm1fhr+eJthjy08DSzLwrmkrJkN/KHaSM9kZhuac2A5l6wMu7i4KI1xZT8YFIQMwL1799JnvEdlAfvTS/3Bca/M0LqBkLzwxsq6WoZXw0lbKxWd09bMdzKZlIJTeHmNdM3cxFTTMuFce05PT9OcJ4v78z//8/jggw8AAN/85jcBAH/1V38FYJHu41d/9VcBIJn4qfmYDe/eZuqNVfBS3VDWffTRR4mZpjxg2x4eHqbxRYaUY3Fvb89lVeqkIqgDz3zYfua5rEwmk5KllJpvc61iQCdez87OUt97IbotC+8xMKtAWaRzyT7XY+F1Tti9tu7t7bzXlD/KQlUFVtOgN3WhJvt10iNoQKcq6yLPPUL3JOzX3LzXcVhnLAYjFQgEAoFAIBAIBAINUTv8eVVCST2halhBa0+u//N06YVNtyEY1UfGsjibaDLVzprlV4dDMgC8Hh4elpwP1V7TOvJqaGdqLTRRqg3Nrb4QTbUx2j62TVTrl8tsbzUnno/UKkbKhkT3tJx1QX+La9eupX6xToKqAeHY43da91WaDV49lkp9qIAi02i1HqvgjVerNVXtKdtfA5Zo/ViPKmZHkwuqht0yUk21Lx70d7mgIOqfYtlb9XewDA616devXy+lSNDgD5bpW5fdVR8pGy5effM8VtrO6cFgUJIx2p82dPVoNKpMzbBJ+HOvb7W+LBPZjDfffDNp0znuKN++853v4F/+5V8AAP/0T/8EAMlXYjqdJg00+09lRhvyuw68ADga6IeBW6glv3HjRsm/Rn36KMdVU90mI6Usgw2zTIzH41Qm1cBqgBBgySq+9tpryfeGV/pRHRwclBKlKmOXs2Coi7qMlPeZl1SeZbQ+2Pr7HKvhMVJNfaRyQR2USWT/sC8+85nP4Etf+hKApT/R17/+dQDAX/zFX6T++cIXvgBgGeDl7OwsrYde4uRc4IlN4PlTWmsPDU5CUMa99tpricn+9Kc/DWA59ubzecFyAticba8Lj53ge8/Ozgq+vUBxT6d7OL1nPp+XfEw11LYNub2O3GC7eglnvcBLul+rGg/qP+jtRbw5a+elXpv6JHN98HxQdb319nI2AIVahVXtrzW1hPZX1d41GKlAIBAIBAKBQCAQ2BJqGdpquFGrnfXCCQ6Hw0IUFH7GKzXnVhMNFCPn8R4bklCZkqbaJI1IZCOvUNvw7NmzpJ0lI3Xjxo2kweRn1BAoK0fNOTWyp6en6bl6tZoNL/xsXbB9JpOJazMLrPaR8iLzWc2Gx0ip3bH6W+iz7N91QFtyTZDHPlH/Ei/aitUCKbth7bI1LDeZKGWrLEulDBXH7zqo8pHSULfKYnBcsS68//nz5ym8LstIrY1GhOPvVOtdFcFtnXro3150Ls9XUsOV6vXw8LAUhYyswc2bN0t+YsoQaRLLdaAMRhUDPp1OS9pKL9oZoT5fGq0PWIxb6++gfn4eI9UGC2K1choZ0iZ9BpYMMf2h/u7v/g5/8zd/AwD4xje+AQAFDTz7y0uEaJPG2r+bIhe9zFpQHB0dJZbtk5/8JACkMNVHR0dp/NDv4/vf/z6A4nqxLSZN52hVMvT5fJ7GgmpgOQ45XjimDg8P0xxivd944w0AC+aR65SNVAaUI792u93GjIGXZsTKHe871QR7fn258eL5wlRpu+uGN656j40Sqr427B+26fXr1/FzP/dzAIAf/ehHAIC//Mu/BLCI4kffKM4dXl9//fVCInNgIQdsNLFtJ7b2LFgGg0GSF8pEAcAv/MIv4Bd/8RcBAJ/97GcBLMfZ8fFxep5NQ9C0XKvuVcsh/U7T9CgLZa2K6Hv85MmTUthzjp3hcJj2Jyo31e9Vr+tEiuSaqHtEu49Rqxv9zkb388Kl5yL65Xyw9NrUSkf3aDa6qFq62bUDKEbGBlBIuZRLgeMl97X90pSxbuyxaEMl6iZAHal1swugkEuJA1Pz3QBF53h2nLdJ0mtTIaiO4FUDwstwr6EgKQy0vtYBXs11vFwmdmB7DrF1oQepKtpVTSvqBp2wpn16iPbMCvg8S2OvUycuNHt7e6WDuo6tXChL2xea58Dm/bp27VoSgGquZR0aPbO/ptCNgBWwGlxDD1JqIqXlefr0aTLt49gmvPwIKhS8zcu6UAHrzSs79jSrvK3vnTt3cP/+fQBLMzNuBm/cuJHGAX+nTvCbbiZUYeQFnGF9vEAmmuJByzIYDApmw8ByvO7t7ZXyrmn4axuYZ5NcX57jtc4Da6I9Go3SPKQp0r/+678CAP72b/82/c38MWpyqSbR/KzKQX6d+tTpZ1UAsk7D4bC0NvG72WxW2kTRnO/k5KQU/KDttAF6eGKZbHhmlW9qXm9TCagyiIoWHg7fe+89AIsNL+cX8/1cv349zS+bdmId09+ckkZlUe6A421CPcUfr95myK5hXmqXNjGfz0uBGQaDQTpk/NIv/RKAZZ989atfxV//9V8DWAab+LVf+zUAi37iM7zUHHajt638bDqfOB5v3bpVCqtPc74vfelL6eDIw7tu2rlu2YBRTcu1qj7eGsv/bTjz58+fJ3lm82M9f/489SnHDGXIzZs3Szno2kqRQvDAqgcWG6SlSmnpfcZn2b2i7gdy+0KP2FgXqvSybhq6z1Nixu4f1BzautpQjntpjLzUN7pvDtO+QCAQCAQCgUAgENgCsoyUlxTXUtnqlM3P1BzLmmVdv349nfSp9eMJkadCPhcoJhC1zI46odWFOr96YcyB6sStNkminqC9DNm82kzZepq2ppLraJPUlI1lZJ/kGCl9n9XsacAQT5uXC/xh2ZZ1aGxqVr0ErGo26oWTtu2tmnIb1lg1kzYxqoZDtrS9slXrIBdkwguCwrrzPs6Zx48fl4JMKMvA8qv2pooNWwcqF6o0f3yvLRtZC44d9tP9+/eTGRI15jRrODw8LJj0AUWn/5yJUB3kkpDr+9SMACgyFsq6AYtxqow8gEK/2OTBqlX0GKlNUDXfNVAN6/bw4cM0xr/97W8DQAow8fWvfz1pavkMmiK99tprieFgHw8Gg8okyW0zOwrLjp6fn6f1h237wx/+EMBCRnJeWauJs7OzEhPVdrk9cxJeqYk9PDwsBcm4f/9+6guGoPa06OxX1l/Nz9jPt27dKiX9VuuKNszH7BhU9lfTq1iWWdeTqmTZanWgGvMqM/V13AO8uth9EbDcJ1BGvHjxIjHrDL7wy7/8ywAWcpxmpGSmyF791E/9VClZ7+PHj0vtsy0miuh0Oqm9Kcd6vV76LcclUyZ85jOfSZYFlHccZxq0QMPbW/m26RzzrD/0vWxPMhYfffRRYt85TzyLD+4V1J3BG5NVAbs07HhdsH29hO1q7eQFKrJsun5XZfHlJUjWMntzqWlCXjUFtG4muu+0DL0GpMqlqtBAdsBi32RD17948aJkAtg0qFgwUoFAIBAIBAKBQCDQELUZKXsCVrt+m6y33++7tt1A0XHehm0+PT0tORx2u92So/wmGiQ9vXrJxni1AS46nU6JpVLHT4L3s96Hh4clNkNZKg2SABTDvdcF36X2rl7YSu8zaiZyNuoee2OdEBXUXrBOym7VBbU/qp21Wst+v18Kk13FUvFq/ae8dleWjWPEjvHBYJCe0RQ5Hyl9p9oDa2hsoOgIS82KTeR7cHBQYtn4fn2n/bxpXWydPKdVvp/tP5/P099sd86P+/fvJ0aDzvCcL8psqL19FcPX1KfICzZBqJbLstjn5+elIDHsh/39fdc3iu+xQTqUkbLawk01tFZ7rRp1Gwp4Mpkk7SwZqf/5n/8BsJifnFdkRt566y0Ai7DN1LxrwldrPbBNJorP5zs1ZDlZJkLbnffZsnp+OZusQx5ySTI9p3bOjfv37xd8JgEkv6gnT54U1letW7fbTXXn7zXZLcce+3CdtYlQHwhr4aBWLcq8sw0sE6jyxPqAalATZaSqUqiskxi1qn5aRn0m2/j4+DiVjezCz/zMzwBYsB8MPPGf//mfABYBXXgv5SHnlSY591I9tBkQRceileMa3InjkWUdDoepbFyjNC2MDZCigXTaYKIIuyZw/I/H41I49/F4XPJx1Trzb8o+1vnWrVuloBvD4bC0d9qEPdTUJ1UWGBo4QfeCdh1RHyv7mWfF5FkzWayTSkDlsg0a4fmqcxyp35SXRL3K767X66X7PCsPbx9cp07BSAUCgUAgEAgEAoFAQ2TVS2qDa09qPD16YbL7/X4li6CafOt/cnx8XIiqBxSjmFWFYG8Cvls1ZFY7oJpMjYpiI3VpklarQVYtkY0c1e/3S9pu1ZY0rZdGianSvnjhz6sSnQF+dBZlqKyGxTu5832DwaCxJlO19PybbazaAj6XWmZNfmqj/WnEKxs2XROjKhtZNUaa2gJbVPknKQNrE0ADS60yfTiePn1a6DMABX89toUXNrQNHykPng+EsmTAov1slEdq9O7evZtYDhtZrdPpuFEBNZTpJnXykhhaxkiTAWo4XNX0A8t56THQ7CMNKa0hbb0QrSzLutpmleNeuGb+rYw7o4qR6SBu3bqV6kB/NkYbu3//fuo3jgEvUWpb4c+roCypjgvrC2BlN1COTmsT124DWlbbxzqnrH/t4eFhgZ0CluPy+fPnJV9kmzgaKI57y0gp07puP+UsCwaDgcu42iheykjZqKtqYWCTtOd8VdbRonusE+El6+Xf4/E4adRZNvp+fvGLX0ws4le/+lUAywiZb7zxRvKloow8OTkpRfbUdBJtzicde7YdO51OyXeZODk5SYw2y8Z1+sMPP0zjUX1L25YDHjOlsKyT+u5aS4lOp1NipNgfh4eH6T5lcKsSva4DXQPrPEf3e95+EMhHr1YLJy+xdhtRf9n3nU4nySMbmbjKT95aFWmUZtvu2iZWrmk4eS8U/Mbhz+0GoglsRXQhsgJVG0fzKfHddgJ4QQ/qwnOM8/IOWWGs7/LCmWscf6CYr8ou1OpoabHO4ZADqSoTNb+zBymdREp3s75V2bP1IKjfeQdSXpsePDT0bVVY7clk4lKx3sEV8POb6VVznQHFRdlbQJrCC8zgBW+xjpW7u7sl2lud4fmdFfLXrl0rOGfyfZseNqrghUplufTgACza2IY950J0dHRUCsGsssgKQjXHsON+k2AThDV90IOUDZvK+gJFRZHNgabzwYaw1bwfVjEDNB97OfNbNWGz7XpyclI6pHPDfufOnbThoCmPHn5t6GpvAbZlWQd1f5szQfUcwD35ti2TPkKVb3YDk9usaGoOXjnebt686QZTAhZrlIZB5vPt+uwFEqoLbTPPNBsoKvkIXZ/tWqnP8sLEewepqrxc6zj914V97uXlZeoLHm45Z9544w186UtfArA88DKX4r/+678mkz7mY7px40YhZDefD2ymbFlVHy8fl53rHGedTieVjX2oTv/8W2Vc7pC6KWzQIFVa6nu59ljZq/dbZW3VXqGNQCAW2g9V+dHsO6sOPbpn9A5bXgCKqvt1X1MXdX9r66vywV71AFtl0qttoGuTXetze/VC+VbeEQgEAoFAIBAIBAKBAmoxUkCelfIc53Phva2jKKFmCxp+sEqDvg4t74Xa9E7W+g5gcdq1mi7V2Fm2RLXL1nnOyzTtOfjVBdtR2Qbv6oWltiYVatZBbbQXmtYz+8uZSqp5Wh142kqPZVN2jVeWw2rTj4+PS2NPrx4jZe/Tvl/X8drT6qqJgX0XsBxP1qn85OQktY+GSweKzrEaQjgXprwNeIyUTQCrf2tSZP5v2V819bUMzmg0KjFS65obeGPYmt7pnFbmnL+x9VFGSoNM5OqTMzXYRphjZf5ZjtlslsaPFyyD48yGy1bzMNVo1mFXNq3HKqiptTXrUXga3ybtviljXRWMxtMSK4ttLUHUzMWm6NB1SJkpu/6otrvpeuuZhdm1Q9+nbBjrZNtArUM8RsrKT2Wk2jCxInLMT6fTyYZEJxuj84uJbMlWUS68//77+I//+A8Ay/l3+/btNCdtePttBnHxGBY1XQRQMNnzUjvwO8v4bKPcXgAOtZLy0nJUWYvkWJDc/kexSR3rMDbKKOdYKs/01DP5z7mI5Kye6kLfnZN59n6de7lAaZ47UI6Vtv1T19w0GKlAIBAIBAKBQCAQaIjajBRhGQy1bfTCLdpABaqt9LQBhGqwrbZ5E98OZS4sVPNgT7Sa6M/zn7Kafz1de9pyq7XfREurjJQ9xa/S9thQ0oT2ITVetMnWcLXavzmtX9M+U20k28/aYquW29OieD5VVnuntrQ22ITaPdswu/1+f20HdM9vxwvMwLLN5/PU9ho+Flholb2kncBCO2t9jDz/rLbgacAIq0HudDrpM9vu6ixtQ7J6zqIawtVjpJrU07OhttrUi4uLQthW3ms17dT8Hx4elgKHaBAGG4I35/zalk+HJ2+sDNAk1do3/N9+p8+xNueq2dumxrwKnibdG6e5tt2WL42HnE+DtePv9XppvFgNrJdEV5NzW3+lyWTi+hmuC2X2bAAPfa4XsMoyb/rMqmAgGo7bsx7w+rwNZrRO4AllqTQkOlCca/SD4nfvvvtuSjtAf0S1kKFssbJyG/Dq6e0dABRS2di12PPZrnr+prDrklpnVAU08X5XxWZUoenaswpsOy9hdy6Zt9bTk4NePfk760fq+Uh5e6+60PlozwUKK4u8fZ4+02Op+H8u4IzXr3XGZC3bJB0QntmWt3lXIa9XdeK2VJ5+r+YWtlE22QhqWe37tfw5EyHb6CqovehA9tCk8Nqgab30IOU9j/97dWHZPKd2G71JzRWtaYIXGWmTg5TmKbMHKF57vV6Wes5NdLtI7+zspI2s1sku1JscpHImfTp+dKPDMnJRtRG4ptNpybFSN+02n4Jn4tmGsF+1QfXGflUUTjV3s+NGN+m6cWjinF+3HnbB8IJB6KZFg5oAKASYsPm89KBoA9Vo8Axbf3U0bhNV5sxedE77HaEy01PktLmpqDLFWOcZV3lAWhcaiMJ+Np1OSxsGvdYJkqF9aZ+1SR/qs6rmpCpVdHxVKU51DniBhTRID+/JmZ23HXSnDjxzOJaRkeA+97nPAVjInUePHgFY5nE7OjrCG2+8AaBspn52draVOuWC1gBlUy/P/C83lrY9D+1813Gn91SZjOnfTcdRG7JP9ypVhx8vEAVQnccqp5irKnOVHFlHyaf18BQP/M7u/XNRYL1y67iz+0J9vl3n6tYpTPsCgUAgEAgEAoFAoCFqmfat0izkHGJzGjLvmR5N52kN9H1NoCdOak6s1l6f64V9zAWDsKxMFVtltX51NARVUDM4r39snfRvq9nw8nlYZuri4qJkPqFZ770AI02htKtn3sJ76jpI8vdVjIW2nTIeVU6LylY1hacxUZMWG8RCgw8wDwlN/ebzecnsUPNY2Lmi43hbqDKlUHhaIG1/awKj/eQFUmmLXfMYKc0fBSzMQm3ulk6nk+YCGUGa9nn5vFQjbR3+2w6w4yHHjmq/WFMWT9tpTYlU27xtcz41l/K++9+CnJM4ULaq8LS5OfOeXD9t0o5aDjv2ve8o+7yww/o7y0h5ctkLBGBxeXnZqqnsKhM/z/QfWMx9BhKi/GDI87feeivdx3vee++9EvNN+T8ej13XhXWxqn2azPFtm/E1gbfX8hgpwjP7JjyLom2a9lWxZlp+/Yy/zQWgqIscE7SuBYgnx3VfbufL7u5u5V5uVRm8tvMYxiYIRioQCAQCgUAgEAgEGqK2jxRhNSqqZdH7reNY3UzilmFYpTltenJUrVhVoAe18fd8S+rY/+e0fh6T5tm41oX6XVSdzqveaTUU1sldoTb5NmjDqiRo69rONtVUeKycF57TG2eelsmyVOro3bSfckwUv+v1eqXPNLw3faOocex0Om5yQD5L66LXqvJsgqo+rmKk7Oc6vuz88VhrjxUnNq2bOvVr4l+gGNxC/fXY3uwHMlL7+/uu3xuvno+UF2SC100YqVy71NHOevLM/l/FmmwLL1uzfZWoav8qbawyLrm+XOf9TZAL7KHjLJeIXVGVYFcZqdz6o+3Vpj/RKmbKfq/rC9dSfkeZ8dprryXZwyS9z58/x4cffgignDx6MBg0rpPHaJp/AgAAIABJREFUeLY5r65CDlShTj1y/l85C6icrGu7znX3j03keNX9dcrh/b8uI+WdIzxZZ/dG+rfH0NeBx87pu8NHKhAIBAKBQCAQCAS2gMYZRT3mJec3RXjaodxJb9vRntY5iRMe69PUXrOqDdbRAmmI65z9aq4cljmcTCaVp3pPW39xcVGyTc+F4F0FTyNQpc3T8nsslUYArPId877z2szz41kHVTbOvV4vfUZWQqPEaQJYYNG2mowSKIbY9pjJq4pS1bTP64Rb9qJPboMB0ffYqJUaVU/D53Kc0Wchl2BYE3d7SVG9SGW8rstI1W2bnDYuNyfWeVdgfeTYQf28iiVpqnXNvacKHPez2aw0bnVM6318T86X0/p9KUPlWURYqG/jNsaqp2Hn+/i9/d/6YvK7wWCAe/fuAVjKjRcvXqQIrpT7jPanbdAUbbO729i7tX0vUOyPdZmlbcs8b72ze4Wqelf5K+dkwLZkgodV72riw6TWZt53m77bw3qzTaACwzP349U6uwF5Z/S6NF0bE6bK9Ei/8w5e3nNzJnX63aqB0ASeCZfN7bLqmd6Byjr6a1tYU7fRaJRMERgIwQs/uwm8iV73cMVr7sDrHbJWHUQ3hefE7x2kaPbBRVadLtnO1qRP2yc3Hq8Sq+Y6UN+EYhsHKK/vPdM+a7qsYZd5gKKJ32AwKBzm9Vmj0aiUk0oPUlaOrqOUWLdt6si7tt7342Cel1sv2kCdg1Gd39aFN3/arpuOW8ozG2BpZ2enNN4BZA8Ddr3K5dHx2lXn+asw9lRWq9k2sJAflCUMQAGgFIBI14GqjeRV4mUdojbBq6wAqlO2KtPMVQctD9tIs9EWcvXIydFtjaNXt6UCgUAgEAgEAoFA4BVF51U+gQcCgUAgEAgEAoHAq4hgpAKBQCAQCAQCgUCgIeIgFQgEAoFAIBAIBAINEQepQCAQCAQCgUAgEGiIOEgFAoFAIBAIBAKBQEPEQSoQCAQCgUAgEAgEGiIOUoFAIBAIBAKBQCDQEHGQCgQCgUAgEAgEAoGGqE4dDuArX/lKSjJl801pZnBmTeZns9ksZSq3VwWzDGv2c2bk1uzk9j57BYB33nmnVsrid955p5Q4q2nGaItV2ZLXzaZct05vv/32HFiUsardZ7NZ6qfpdJqu+r1eqzJkA4uM8+wnvfJvZqT3ss3/yZ/8Sa066dgjNu0nRZsZrjcZe1eVsV1Rp43q1unP//zP5wBwcnKCZ8+eAQAeP35cuD558gRPnz4FADx//hwAcHp6ivF4DGA55jhGdnd3sbe3BwAYDocAgGvXrqUr/97f30/3DAYDAEC/3wewHIPdbjc994/+6I9W1skbd3Xg9WOdvvX6ou4YfvvttzeeS7zu7Oyk+UuMRqPUR2zXw8NDAIt2PT4+BrDoe2DZ5gcHB9jd3QWA9Hte+Vu+k2VgOdaZS3wvy8ixs7Ozg4uLCwDAo0eP0pWfcczcuHEDAHD9+vX0GcGxqetXUxmzST/ZZym0HLY/665X9rl15dE6dVo3Z+VVycg2+ikH3ctYXF5ert0+ObQhI+rC3u+NM875qvGcG79NZATr47U55/JkMkmy4PT0FADS/8BCjgHAvXv3AABvvvkmXnvtNQDLtWc0GgEAPvzwQ7z33nsAgAcPHgAAXrx4kd5FuaJrF+XVH/7hH25l/7DunrWN39WV43/8x388BxYynGODoOy9uLjA+fk5gGV7X15epjWG/XT9+vX0P7/jXpdr1IsXL9LfXJN2dnZS/3Dt4P98DgD83u/9XmWdgpEKBAKBQCAQCAQCgYbIMlKqHbCaAtXU8dSnjEeO4VCmAlhqFKtOwlZT2gY8bd6q7zxWjbCn6bpavza0bcoq2f5R9mkymRQ+m0wm6VRumalV/cU+44l9d3e31I68p9PpZNvOg21PW6bcZ5v+7ipZojrlb7s8bT6PWqCdnZ3Uxzq+gIXmh5q+s7MzAIs5zbrzfv4/mUxcJsCWXbWbOdZ6G+2Xm9Pe2LVgvTqdTnYMtK2lrtL4djqdVG7tR/YhNXXUovJ7YKnZo5ze3d1Nc19Zx5w2ehOwvKwTn69lJbOpLBXrRhk2HA7T36yLjiP7/G0wCERufHnrlW0Dzh+Fbf8647RNvAz2/VVHHQbGw6vUlnXKacee7unUuqnJs3Oo0z6z2aywP+JnLCvLqGwSmSh+Rhkym82SrKN8GY/HlbJi03WpCaM0n8+z1lxNWexV72sC/a1njQYs+obv1zWHf/N37JtOp5NkP68cWxcXF+m5do+sz9K1o46crH2QqjIBm0wm7sbJmpZ5JiRcbL0C6wTMDYKm8AaN/Uxp9tz9iqqFTyeMt5Hw6tS0frrp1H4BlhsZPTSRHh2Px+k+FQisoxV6enjiQKtjCtjpdBofhpVSzR10rfDN9av3rNzvFC9z4VolqF5m2WiqoHPXHqj0IKVXNe0ClmMQKJtUeW2gm107t9psE288e3O6zru9zXiujtuCfZfKZZUhLBvnPg8k4/E4yRH2Kb/r9/upfp5pt22nTRfiqo1Yr9dLcoRl7Pf7qX56qAcWCzEPjGoaSniHk21g1fpAaL2tMkLHlG1n75lXfaj6vwgdn96cbyK7tF/bRpP5qPOP0HJ5LgDAYrx587ZKFrZ5oFKZZPeuk8kkHZKszDs4OEjrHetBk0A1E6RcnEwmJYWM57LSRp2Iqv6oUmJdXl5mlZXbOkDp+wlLrOh6xL/5Tl1/WEYepI6OjgoKfsVoNEp9RozH4/Rcaya+u7tbq59CegYCgUAgEAgEAoFAQ9RipJTpsFToaDRyT/VV5haetoKnRjUZUw1Gm1pmz1zRlrUqaEMTClQ1EJaqzLFU69TRM8Wx7NPFxYXLCihjBRQZJqsh4Cl9MBiUmCjVelhq3NOSrIJqEqrapIp98vpT6+Z9V4e12hQ585xN8DLMAwmroQPKZqLj8dgdj/YzNfWjTMlpbnXu2LHnzbVNUXeO5sx0PJlTR/u6LRM/T06xbGpZwLlPrd94PC45aHMM7O3tlSwXptNpekbbrKFnbs46UbtME9TDw8NUT5afv9/f30/1Y1nVDKQpg9a0z5qao6rc90yUCCvH1ZS+ibnlVTKmV4VtzSsPl5eXJQsQZXw55iyLcZVoOrZzjFS32y3tHbRu1nJB1+5NUYdJmc1mbn/wt5QdDGZwcHCQ5IPdH2pABDUdtkyvF4CrzTp5DI8GAiPUzUOtxfQ9dfZddctXBd0r8Pfc++nz2Kb8bDKZpH0DZR2tCW7evJnqYi0MxuNxCpLEqwbHs6Z9/X6/ZDnnIRipQCAQCAQCgUAgEGiI7FFLT91VPlJqZ+ppE6wWrN/vF5gNfgYsTqLW4czDttgDPdXb+nqf5aCaB6th8kI9EusE1vCCR/AETy3J+fl50sDys7Ozs3Sf1WiynEA5rLDWX/2n+NtVwUPqQMdBlVbHY3hy9tbKrHo+fLZfVdvWls02y221N6tYiCbvy/lTNP3dKqjGx45dZZioAcsxpBratMrH0itnFevL66b+H3V89LyxotcmMiPnb7WpltqWzdNasqzn5+dJO0hNLBnIJ0+epDCy6n/Ee9V3APAZKa9cm9TJ03DznWSkbt68mcYsy8aw/fv7+zg6OirUV8PpW7ZHA1C0iZzvklpGqC+b+r0CyzbodDolOa6MQS7whB0r26rvjzPTVafsKge59nJ+qDadzAf3RdsIsrUpqph1oGwpsLu7m/yLvDpx/tWxGGmz7MqSW4uc+XxeYNaBJSN1eHiY6sN+VMss+lxSHs5msxKzrfuaNi2sPCZKZRfvs/vCy8vLUuAwz7okxxRuUg/dM7KMlhXqdrtJvmnwCGsRwe9u3bqV+pPjjv02nU4TE0W5DxRDoevv9vb2ao3BYKQCgUAgEAgEAoFAoCFWG/8h7+Oys7NT8HEirP8TT3iDwaDwt/6uKiSmZRGqQiPXgXd6zmmsNUxxFYuwyi7avmc6nZa0TTaCWRMoI2UjaVFLcnJykk7w1CQrI2Vttnd2dlK/UDOj3/H0n2MjFU21FtruhB0nykTktNzeWPKuTVjIdfy+cj4Q3hj0koB61zps2bZ8IHQOV9n2qy26+kVZJkqjHVntf07zqQyvd23CSNX1b7LjQkO227nkzQ31AbWsfVViclv/dWA1mPpuvoN9pIwUGR0yUqPRKCVZtuFnr127lmSN2rF7vpNAkQVvo046dmwI49u3b6cEvGxHaib7/T5u3rxZqC9/5/mQtc2keEyUty6o7yFQtDZQbTh/b5MMs0/VWkLnrscQ2DLUxbptlGPjc+y099nLjGoKFJkQzgeuwcqeWgsd68d4FWVcdY+dayrbOK6Umaa8sHs9nfMarbVqnWvaBquiXPK91kdKI32y7ExCfnBwkPZCGuIcWOylbNJYLYd3Xbdfc3Oj2+26ob81IS2wbP/hcJjYGrvPy8U6sH+vC7Z7t9stMP1A8czA8nKN2dnZKewX+AxgYXXw+uuvp/uAJasILOfekydPACzkv/VvY9sNh8OS/PRQ6yCl5jF2gdfDky7K7BR2kl7tpNJQuV4odUu/KpoeprzDmv3fExi60fY23FWHKi+wxKoM500pfW0zS93yIHV6epomERdd7yBF6IRUqt7W13NW90J+NjWvYhk1AImXu8rbhFY5TWq/egcpG1BF80zkDll1kQuY4B0OtR52fHkh770AKbl3twHPZNeO36p57QU/4Xc255nX1t5hyYbqX2fsAcWxYsugJiFqSmsPhPo7NesFioFbrGJJc1gQm/SZN/4Jndusn8qO27dvA0A6fFCOn56epoWI93DDsb+/X3DCJuymXTdTbRxK+AzdHGkgCQC4c+cO7t69W6jLj370IwDARx99lA5St27dKvzOkyvbOkjlFETz+TzVj/304sWLJNv5GWXC7u5u2kTYcdXtdl0lqN3QbHrQXYUqU9mq1BbzeTn0t7adZxb7sg9TFmpeyf+rNt3e764S2u7e2mL3gpxX169fTwoJu0m/uLhI41j3f7nw5+vUXfMbWkWLmpyr4tYGmWAdDg4O0nxR82dgIQ+51+KaoKGz7T5lHdO+VQcoYLGG2NQ0T58+xUcffVQoG+X53bt3kzkz24Kmb6PRKD3DC/62qox1wHfu7OyUTD3twRxYHgSHw2EqGz/j/4eHh3jzzTcL9eUzjo6O0t7y8ePH6crDlXWNubi4SGM3hzDtCwQCgUAgEAgEAoGGyDJSqjWxWgfVAGtmeKBI6VJLyf/39vZKTJQ6olsm5ezsrJJOXse0z9NiewyGd+q22hLPLMy71innJuGaveTIufDnGoDChpAkdnd3S2EoVatitQXKSNjvcsE1qkANipo52SRru7u7JS2+lsOaAmo5PNbEC9lcFfZfGay6UMqf8JgUW89+v18wxwGKWvGqMmpY5Drs6TpQuWCdVXUcW22gJtSzjvJqcmHLr9nitS08ZpL3rGPaN5/PS0yUls9jfq25h2rybThWla11GKi2TEJsn2u7WXPg8XicZDoZGrbrkydPEiN17949AEhsznA4LGn2+v1+ZSChtpgdu04os8l6qtkH6/S9730v1enDDz8EANy/fx8ACsEncuGB22Sncmbk0+k0tSm1qE+fPk1mljbZ5P7+fqo75xD/HwwGqX/VhNDKES8FSNtoEgxHLWRsX7RlAtsmdK5zH2TZ+/39/VIwkG0EXNgEykipRYplojhnbt++nf5m3TSgkDJRQHEf5Vn2bApvDbJjvNvtpnpw78rrcDgsmL0BRdcJ/s1navA0b13aBF5gD76Hspcy4cGDB0lmUK597nOfAwC88cYb6bv33nsPwNLkTZMT2/0H0E4ALpZVTVs1UA6wmBt8P+Xb9evXS8nWed3f309y/JOf/GThWQcHB0nu8/rhhx8m825r4nd+fp6sEnIIRioQCAQCgUAgEAgEGiLLSKnW3rI3Gh6QJzbakh4dHSVNhHVSVu2rtfU+Pj4uBGQAFifWKg3mOidgPblbbYF+Z0/gvV4vqyW0mgq9ekyBTWibCx6wCsqkeMmRgaL2RbX81tdJQ0/a0JHqTM7+5GfD4TBp260WY5VdrQdqFBQeM2YZqb29vVQOWx6tU05DpIyK9evQENFNGSmPOfQSIdt66hyz18FgUArooho+Ow48lmqTscf7NTiJbVvPgd0L1a/MlGV1tE68T9k8L5E3y1InoV5VvTyoRly1+1Y2Esra21QCatOu88ULMsF3bwLLjnvpC6j1m06nSX7TD4raykePHuH58+cAUGKtdnd3S6GANbFhm0Ebcv6Do9GolOz54OAgMVJvvPFGofyPHz/Gw4cPASDdc+fOnXSPHVvrzJcccoFzlJFl27L9nzx5khh8fqdlZB/a4EGaYFTXXfaZ54/ZFHXGq+fX5PnG6hy3rI23BipTX2XFsmn/1fGXU+d563dDX5XBYJDmHbXjZBy3yQTm4M0r+1m32y0lvaasuHfvXvqM0GSoXnAY2z/bCBHuWWwQu7u7aU7Yvtrb2yvJNfbR6elpKdCLt39okgA7B88qg+WiXKAsu7i4SD5DX/rSlwAAn/3sZ1P5v/vd7wIAvvGNbxTqduPGjZKPm1rAtMGY6vqullL8DFisHewTyvM7d+4kppDvp69Ur9fDBx98AGDpB/WJT3wCwKIvOefU/5f3WasSXUNyqHWQ6nQ6pY0AF5+Dg4N0aKJpx40bN9KiqrQoUHQq46Bkg3lmfOoQ6DnVNxXuemiy+ax0A+5txr04+yyHNalTutHShdo5nglT0w16jhr3ggyoqZydiNq/7Dsr9I+OjgoOmLzfO0Dps5uAwsA7iOrhz1Lb3gGQfXjt2rVS8BOvf70Fm/BMSOqCzwdQMsfTiIoUzmp2yffqvAMWfcJ+sUqLvb09NyoXy+4F3Fj3EK/vsIcMbUPvIGVN+0ajUekg5UXH4++8yHdeQJIm0P71IqhRsOtBkvXMmWZ6beJtBquiS26yedff2rLu7OykNudYnM/npSh2XKAePHiQ5Bq/+//svcuTJNdVP/7p6uqqflR3T/eMZjSj0Wsk2ZYs2xhjIILHiu+aBcGaINiwZ8MKy8YRbPk72BBBBN4QgQMwERA8DALLspAtaUaPefVMv7v6Vf1bVHxuffLkyezMqqxu2b/z2WR3VVbmfZ5773l8DmV+v9/PbTiuXLni1p3vaYpMA8i69lkmrbW1tXQ4ev755wGMDkt3795NB0UqcnigWllZyeXC0XmpdWkSdvN3eHiY23BvbGzkNgJK9mRlhyo8+RnR7/dzxDpNucgVkSmoO7bKaso4VdYBwzlk3W2VVEnJlICsPCHGORRWqVvZd3Nzc0lWv/zyywCAO3fuABi2AV2r3n//fQCjvUG/379U9z6P5EDXXa43nP909b1x40bqM84/zZ9lCXtUwTypAslzQbNrrq4lqgTjuOO+lvVTZRPHFjfvu7u7GTc1oJiRlfUZt091DSdUdtOlj3jllVfwa7/2awCA119/HcBof/Vv//Zv+P73v5+pC93hrl+/nvpUyTXsQWoSUhedx6yTZXydmZlJfUBZfePGjXQQosygzH78+HGaS1yvOM+uXbuWG6/r6+vptzzo6z5eCZOKEK59gUAgEAgEAoFAIFATlcgm1HrD05+aP6nhUsuFWiqA0Yl8MBi4Ln3A8ESspl9geNK35jYvcL4qVLtsXaJYVtWGqVaM39tAUQ3QLdOQqRmY31uNqQbaj4Oi4NtOp5MzY6ubpboosd6qudTr8vJyhuqYv7OuO6opHpcq/Pj4OGfRU02WtVJqbhT2r2pnqY3wrlbzOT8/nyOuUK16Xe2L5jKwVjbVcKn7FDDUsFDLxDGkgePsC1oPqKl55plnchaFhYUFN3CU/9ftJ7UWqyaOZeNzvcBim1tKLVJFZBM619QyZS1RbE914a0Cz8XTEoIoycl5MgPIuv2x/pzvBwcHqU81v5ul0rUumuNALVLeeLZkE91uN40lzhNqMu/fv5/an1pCyofd3d0kv5WG25LXqPW8SWIAdbm27iJnZ2fJKkB3D1qm3n333TT36BbDeffMM89kckrx2rRlw9bB5ozq9/tpnLCNt7a2cvlhdA6y79g/1MSura0l2ejl9VGNsC3XOLBrk1pu1RIFZNcYm4toZmYmZ7HW9iojBrpMy06r1UrygvOKY3B+fj6V9/79+wBGbrQXDS/cgFfr1bKwsJDGlVoLgGEdLaEY66ieOrqvs1bluqlgLHSP6JEyqXxifawnjnpTsax2Du7t7aU6qkeO9UTwUppUhc4f2066nnDPcvv2bQDA17/+dXzpS18CMJJnf/M3fwMA+Ou//utkzf6lX/qlzPWll15K9eVeRCnRy8h3qsJaloHs3h8Yjh3WibLr5s2bePbZZwGM5Brfv7W1hU8++QTAiECDFqdbt27lwo7W19fTZ2wfpcW31mwPYZEKBAKBQCAQCAQCgZqoFCOlWn4vANTGwmjAqrUYqAaWPt7UvDx58iRzwgeyids86s0mNZmqbbGJM9WaQQ2FaljtyZralqLYoaLT/LhJ5/hca41RH3ibVXwwGOQCkL1kemqJArJBymqxKYpVUM1PVWiAtLWcaFI/pXcHsjFVhPpzFxFo9Hq9TP2ALKmGjalSK21VKKmAJclQzRvHEOfCxsZGShpKn19eHz9+nLTRnE/UxiwvLycNDjWga2trOd9vrVPd2C+1mHgJP3n1xoaXpBcY9qW1xHhBqWrdsrGJaqHS2LTzoJbcIsvm4uJiLm5wfX09tTU/Y/vOzc2lemsSVWCoAWNZ1ZJi663lqjuXCNWAWmvbYDDIWcHW1tbSuGEZSWrw+PHj1N+MieAcOTg4SGOXfaSxah6t9iR0wFZ2qgaa44Hl6ff7qU6Mf3rppZcADDXqrB+1tNTE3rx5MxMrAWSp9cftEw9qOdTYQJbfpgjRNZJg3ywtLeWCq3ldX19PdVFr/ySxrefVS6EJOK1lfmNjI2e5VHlhrdiexalqqpOLggbqc93SOHH2gbWyXRbsOg7kY6mVTpoWKf7f6/VyseDqneNZpJrYD1l4JBN6BUYyfnFxMbcPUBInK79VrrBvdZ238eKT0p4DvnzgtdvtJnn86quvAhjKZ6Z4+N73vgcA+Ku/+isAQ0vNb/7mbwIAfu/3fg/AyCK1s7ODd999F8BI7h8fHzcSA0/wGV7b0vuBcwQY9cmNGzeSxY0WUNZxe3s7yW9apLgnevHFF3PecleuXEmeO/yd0vRX8QIJi1QgEAgEAoFAIBAI1ESlGCnPR1rpvvXECAxP58psBIxOeLu7u+mkSQ26Mp/wVKraKo8aFRiPVls1vNSE2BgIteyUvYtt4LGCedYt1ZIXUbseHx/X1lqoxcVaAJXO3GpNNRmoR/npWaKA4Unes6R4Ce9Yp7qxHfR/VQYwarM8hjuNP/NYEoGRNQBAzuKoFOPUPPd6vVzci1qoLOPVeVAqZrabxqQBQ+ufZbqcm5tL7ce5Qk35J598gnv37gEYWan43d7eXobtCxjOOdVua500JqxunZR5y6Pz9uZOkUVKY6Tsd+pHr+1px5xaqCa1SHljxcaKnhdDaS1mXiJHjZe0mtxJ4kLLYhzUSsT38129Xi9ZM2z8xvb2dqLUpQaU7by9vZ3qpDGaXmwUUVeOe7TTnmXKMhHu7u5mLDLAiEHt+eefx4cffpjuA0ZzaXNzM91v4x288o9jTVDNv1ongWyMnBcfpHIeyPr/P/PMMwCQrqy/UlNrjGlRQthJ0iPoO2zdTk5O0njXGGnGK/BK2acsdpSfGidK7bJa3q0XiVoIpm2V0vpyXFFDrvEXjMvjHFNrVZV2b6oeRe9S9mZNWksZYS3xGndprQw7OztJ3niJy5tM+2CtNmoRV/nEstv4b44VjU9kPSzDLuAzxpaxtdaFzkNe1WuC8ph1eu+99/CDH/wAAPC3f/u3AEZeYP/v//0//PEf/zEA4Dd+4zcy37399tt47733AIzWJvVGsvFr49RH985WBvB8sLm5mWF/BYZyjesPLVOUa48ePUrPoCcPLVMPHjzIJI5nnfhc9j37VVlMS+tR9qUemmzQKwfE6elpamRtWEsowYJtb2/nBiEFxvHxcS5gXV2oLF054B9iyqBugp7LAOvIjuMCvL297ZIRsIx28dENrHXFODw8LM0eXxfeRtG60XibqG63mztAaTZvLkTWFNrpdHKC7uTkJEdnrffUrRcXfSUlsAei/f391D/qEqqHKt4HZF3GrPvE0dFRLhdYv9/PEYXo5Kt7kNKDHcukRCu8cqLrBojCkUGj2r+cT9z0qfsfF2earPf3912aVv5f10WJddJ5aum/NehWKcSt25JHf25d/LxNxcnJSW7e6bPqHKQIfY912+33+5l68DNu+Cypgua+8pQBlir86Ogoo0jSMuhn49SJsG4ZShXOz5Q0iOOfm7zj4+PkysMFjP2pC58ePi3hxrSIGgg9rCuZC+vJDR8X4jt37uDHP/4xgNFGVzfzfAblvh62m8ipQig5TxENPt8PDOU4+4zynLLjxo0bSXbwIMgD1OLiYhpnusGz/aPXJjZ/dr1VV1C296NHj5I8o+xSAhPOMa5R/F835aoUKUpjcZEYDAa50AWOt62trfQZx5QquMZJ4TAOPKpzQuU454CSgnhU+krRDyBDmkR5qod4YlLXPi9FjnWXVgWE5lajzONnWh9dp1kPPtOSqGiOQI+AaxKlBGEVYouLi6m+d+/eBTAcY//+7/+eygQAv//7vw8A+KM/+iP89m//NoDRQf4f//EfAQD/9E//lOpHGdLr9VK7eHK8bp20fTQNEjAaKxryQ7nW6/WSsp2ELcwL+Omnn+bChijPP/vss/Q7VZByDFOesH+rrlHh2hcIBAKBQCAQCAQCNVGq5vC001aLDOStMKrJtybd7e3tnDlUKVt5mqc2ZnFxMf2tgWl8b90TsGolrHXNC1BXa5ilgFdAqNa8AAAgAElEQVQNvA0m9OhY1bJiNe7qumTJEs6Dmo2VZt7CWvYWFxdz2bv1ammcVXtQZnkjOFaUXKMqaGoFijOTq5uL0hxbrb9elcQE8JMBEpr4zqO3Hzd5rWrJlFAC8N0tV1dXM5TmQDawl/fR1M177ty5k7Q6GkRPbY2ar4Fhe9a1ErDdz7NIWc3Z/Px8GmueRbjIInVycpIro5ckUPurjlup9ql1q1MNOtvVs7p6891a3VSbZ1MVaHtZuTKOK5Ln/mbno8okai3X19dTH1pa8FarlYJ82Y8cC5ubm6kN+J26jNq+msS9SqmAvdQPmkgSyKbYsEkeX3755WSd4nzkHHn69Gn6m9pL9UTwLB6TuGBaba8+SwPjgaxFlmWj1vXGjRuZ4H8AGRp3T1tvx30TyaAV1pW+0+lk3MFYRlrmrRdBq9XKkQaVpSnxEs9fhEXKtpW6mnI/RLfsjz76KNWPJChMjOoRel0ErAfL7Oxsand1aVYPISDrhaQpPICRu9b+/n5OLqsLeJNuijbcQdcPO5eUYIrf8f7d3d2cRUpJ1awLeKfTcd2AWa5J4K0ZwHCOsK3Z9vfv308yjtan3/3d3wUAfPOb30yWK1Ki/93f/R2A4d6EqSGUatxSrk9ijedeutvtpr0Z5wjbeGNjI60/LMf6+nqSD9bF76OPPsq5y6u3Du/ju2dmZnLpFyhPjo6OKu3HwyIVCAQCgUAgEAgEAjVRKUbKS2inml9L5ekRSmhcFE/xlhBhfn4+nQTVF9eedFVrMS7ZhJcktiz42dOqexTwngaZ0PdZLYlnragKakA8mlKP+lu1L9QYU5NJS1Cv18slQtS6WBKDmZmZnNVGtYB1+8kj/ijzM9ZAUhurpdYqG6+mxBQ2wN+zfrAM7Xa7tt+6LT+Qt2KoX7kX06K+6cBQQ8Q+s7TmGjvBa6/XS+Xgs7wYsqrQGCmPVIX/e8QN7DuOQbXasC94VcuUjW+bmZkp1I5pfFIVeP7enkXUWsz29/cLx5Ymjtb5CAz7wFLxa8oEa+XTZ1TFebTnQDbRIsuxsrKSfkuNHrWES0tLSTvIslLG88r6AcN+t2O9qaSbFjrmrTVxZ2cnzS8bHP/8888nKnRqaTW4nHVX0omi2JtxtLMeqYO1tmnyWsoATRFBLa3GRVlLFJ95fHyc5q9erQXIs4rVha4BVove7XZTXfR+u6aqfPHWApbVWtJ0TW2iLmUoe65aRygbaPm8f/9+am/2L+eXxqZdhkVKPXasV0u73c6ldqBl4cmTJ7kYXU2ySqhMatpyWDQe+D5NbwNkY4BYFiWssfH9nDdANjYKON8iNel8svUEhn2gsfjAMIb1q1/9KgDgV37lVwCMrJ7//d//jb//+78HAHz/+98HMPKSeeWVV1JMNi1aJycnqX+9dbgu2Nbz8/Op/ex54smTJxnSH2Ao+7hO0TOCsVIffPBBus9atx48eJDGpCb8Zj9xLeD14OAg08dFCItUIBAIBAKBQCAQCNREqUrdi0WyGj6PgWxrayunndTEZRoTBWR9FT0/U6uVVUryutpZT0vF8qjmosyvllCrk9XGqxZNrRi837NOjAtlOCyiN1bKZmXm82KjgOGJXFkJFZoAVTXtRX0xjvbF80u1dVPWLO/dVhOlMVWWGr3f77ufWXZFL/FjVWgZrVaW/aS+1h6LpKVlbrVabsJYXqlpo5ZMx6M3tutaBzQBrrVEeTFSOmdsLJgX+2YtUmrdUfC51u++roXXi1PxZJ61PikVv43D0/Jan3xthzIqb/18XJmhsUiaPB0YtqtaLYGhDGD7P3z4EMBI67y2tpasHvwd48b29vZyNMmzs7Olc2camnYvxnF/fz+XdoOW3GeeeSbFpVBjS8vUwcFBup9toEyEdk6NA88iZWXe3NxcalPes7i4mNpb6YGBoYy3Hgs6Lu2YPTg4KLVI1ZV7Xr96SVft2l627muMrmUTPo/x8rJhmd2o+V5dXU3tbRM/TzLnJykjofsXu14B2dQawGh8PX78OK1T3Afq2POSJDeRsBbI9r2V41ovrosak68JeLV+W1tbqY6cL3ymjmEvdt5LjzDuuPQo1HV/YuMll5eXU1wQy/P2228DAH74wx/iP//zPwGM5tDrr78OAHjzzTdTvLXGoluPskk8xDTW0XrIqDVJE8EDQzmnMeTAiLXv9u3bufvZh0+fPk1rGWWkxrPadWt+fn5y+nMVoHYjqRvSMvcAHWgsqEd6AAw38dbVzHMxU8E67sTTgVzmHqabqKJDlQoAz+2vjA66iWBytqO6Q9hJvbCw4OaKsu2teZLY3p77hF2klCTDM6XXpdVWN7uinFu6eVeXKdsXiqLNsea60s27JTvQa906ab9aNxeW9ezsLAkUbceinEt66Lcm/e3t7dzY63Q67sbElq8q+C5ValhlgneQ0kO/JweKXPs0+FM35nyWPbxoQP24KNv4a51t0LUSLNg8P3ropdDWQ6/NRdVU3hsvbQGQzV+nrmDcaNM1h2157dq1HBkAD1KHh4c5MgB1s7N0x5NuEq188p7H7w4PD9OBiJsipT7mhoNX3nN6eppLp9Dr9TKy15alic2vHWedTifNEyUWYnvblBWdTifj9gNkXZfZv1R06kGqyJ1/GrByWRUmlhRKy+LRw5eRZEyzDudB12fOC7rvaT/ZPFgqK+3zpl1eIKv0s2vq0dFRGjuEygzOH5ujThWv+vymXPrKwlI8N1nOqfn5+VwOVCVO03kCZHOJqpsaMOzTolxL44zDMrmmz7Nu4b1eL8n5Dz74AMCI5OSDDz5I5b1z5w4A4LXXXgMwlPF6iASGcsK6ZE8ix3U9tEpfzblIsgkegq5evZrLxUlCrlu3bmXSdGjZ9vb20rN41X7y1ukqZFXh2hcIBAKBQCAQCAQCNVE5IS9RFrSpGmYbsMcT/2AwyLibAVnKQRvsNT8/nzPxF2W/rgN1D7AaUnUZ0+9snbXutl1U+2rvV62Od7qv616l5mNrieJpvdvt5jQmqvm2FjI1d3pundZioPTJ1npzcnJSm9Kd2ojZ2dn0HKs1UCtIGbGB9qsdOxo0a60FWm6bzE9JD5qAp4nTcVBGfGDHXlm51PRvXWzHsXawz9XqZMeSWo51ztr5rNnlrWufN86s65G+05JPjIMiq6G+R131ioLz1WJtLYSaasEb103SAXuuF2rd0/ZnOSxpENtgdXU13UcLjVpvLAW1ejV4Fv1pwVqrNFia5VWCIwZV0yJFzebW1lYuqfLR0VFOQ6/9NUnCTS/QHxjKbE3ES1irriYT5XyhpUCD6Nl3SjhT5sbcRF95RBpWdlV9j7cWn0f4cJmw+yH2q6YaUDds4qLK7XlNeGNaXWXt2shxtrm5meaK1errPqcqtXudsee5ydrnqBVJrSH8nvNGQ1b4tyUw6nQ6mb0Wn19Eez4p2URRnXTtUEp0ehTYdETLy8vJnZmucbSEqhs066379yYo+fks9ZhiX+heS6nQgWFoA9uZ5aBX1fXr15Ol18o3tcLTg0LfTZmnYQrW08RDWKQCgUAgEAgEAoFAoCbq8TfD95u1msx2u51OlVarpfdbP3olRFBLQdnJt+6pXv1fiyw/arnQU73VqJYF6+vv62qXxw3o9WjYPYp2LY8XH8bvbN9pfI496WsQok12O06iV7Xo2XbzLExlBBQeOUmZVZP1VRIRj8SjboxUGc4LwLflVIuI1cp6hAlV31N37CnVve0Lz8LitbftL03QSe1YmUVKrWy2n+rSuXvaVztftc3LSGh0vHqB1WUo8qVvioLa8yywVMCnp6c5ul/K5/X19XQ/tZW8zszM5IhqvDjJJq1t+hztN48mWGMIAWTo0GmdokaTV421UFIaqxVtGnZOAX57sRxK3AQM5wjjOSxdsdKfa4C3va/svZOgbrxIlfsukpSh7P1FZbVkE7rfKaLd1oTeF2lRs+stkI+R11hApQnnVe+zz7TPb5JUQ+Wa9RDQdUb3nkA2TQPnDeXa9vZ2xrKhz1KPH927eiQTWr5xUDYG9H1aD+stpOQaXGNZbo2bZF+qN5X19JlkTHqxn7zqOs++oBVpY2MjQ4rEsgFDOU6SDCvjZ2dnk8yz65ZCveuq7CFKD1Jl7kY6MSzT3uLiYq5xdQPq8e0D2Vw0ZRtcdZ+rO/F0o1W0+VLmNA1ctW57ZSxLKiS8w1VZvqm6dfJYb8oOnV4+HDspVAjwfj1IaW4mYLgQq3sI7+Pv6rrBlS2yHkuMN0a9Q1PRQapu240jPOqylXnuKmX3V5mvReUYFxrM6QUP8/8yZYIN2PeUM3qgqpPvq9/vj3XgLVvUvXFXdkj3nuWxSum1ygF4kjpZBQqQdwc+PT1NiwznMvvjypUrqX6W+U3ZsNRFuijoe5rwNrYcNxpEDmSDjhm4zBwlu7u7OQZPLz9Y0+W2z1XCAk/hY+X48fFxksv2d8qOqZuWqhu1KpjWxv+yD0znoaqbmrpsqjsmMJIHyqh5EQepIgXH2dlZTpmgSlKPEdfKMVUIWAV5lbWtKjz5attc3fGU3MfmyKR82NnZyZFmKOGBJUMrc823f9epkwdPGWrJfYB8XsL5+fn0W5v/UN0XtU5Fsm6SsalkTTxIaR5Q6zb69OnT5Mpn23ZhYSEx8vEA5Y0H1nN3dzdnePBkaxnCtS8QCAQCgUAgEAgEaqKSa5+nBVNNLE+reiK1WlklA/DowPVefZaXB0C1dXVNpJ450n7mne61HHXNs1WsH5OYt8s0OqqdUE2qvpPfF33nWaQ01xIwPN17xAD83STEDGXuTV6d7Tgps9R4rgbe/UUWhUlR112lKctEU1BX0KIxfZ5FyiNhUU0t4FOjq8WzyNrRarUq0ZcSXttUGUee5bmsrp4125MrTWqgPbcP1dZaynXNX8b2pTtFr9dLn1FLyHbWFBf6vmm7inmwfaCuPtbFQ7XKHG+0TD19+jRpplUDb4lw9L1NEJ0Qnju2fmffr2W08lDXBEsM5PVTUZkuGpf9/mlA9zbWhUhdLC+DbIJQWeG5fXr7AyBr7fb2OU2lP/DgWYDKrIAqr+zehvJhd3c310dqxbcWKc8DoOmcZlU9WrxQD97DelqLO+CTkNnnN0Ga0Wq1cq7l7AcdR2olpJufR3KlnhPAaGz2+/007ljPfr+fS8nknQ/KEBapQCAQCAQCgUAgEKiJSjFS+rcN2PPuVw1ZlSSqZf6d3mlXT/fjnobPq5tnBWlKq+DRTFfxEy6CRy5grXgaD6X15am8TPNgiSjUt75K8tqTk5OJiBnK/MOrfOb1dZHVyv7txShcNM4b45epqdV2LIoR8uL/vDmvFhF+xvGoqRLUBx/IWga8WCn6fk8KT7vpyaIqFmLP0q33NKHts2VVWDnRarVyMuDw8DC1NZ+h9NpWg6nUsXwWoTGmRbKvKXjywhtvHDMcH7SsASN5yNi8tbW1XIC5WvltCg37zknrov9748zep+unlb069qyFwbN+lFnmP4/4eSij3Ut4lkM7ry4bOl60TF5SZEI9l/T/qp434/alJz89UiNrhVfLoJes2qYM0VQ+lrZe95FejHUTe1eLojhsuyZ58bhWhnlEafqOJtYmHfPWC0VJjyiref/+/n4mtlXv1wTljKPSdDp2D6geVoSuIWGRCgQCgUAgEAgEAoEpoHKMlP3b09qr9sGe2JUK2Won9ITraTfKNB51T8VVmGHOzs5qn/rrlKdJdhr7bmvR0/6yWpHj4+PSODXCtvvJyUnO6qQaNaup9qiPx0GZhr8MVZhyirSuth3LynIemo51qvOsaWlptX2Uwc9ePR95wo5LpeonlB2J/s8ai2c1VjreJmVUK7N6jstA1wSL0zgoijf02kgTb/N7av3a7XZqc97D/pubmyuM2VFcpOWgbLxpol07PmmBW11dzcXmeevVtOpU1SrkeXQUMY2qFVXvsevyJHX6ebAOXSZ0LbZxxGUeMFUZ3KqiytqqaSaIojFU9NyqnjeTjhu1slgWWU22apPCDwaDnIWa7J79fj8Xz6sMgGWJasvivKti3D3peeUo8qQo+l2TY09ZT/lby+p9cnKSK9PR0VHqF97nxTWpJ4t9p1ohuZZZL5qq/VTZta/oO90keXTHFue5GljKxqq0402iiUl+UcGhQFbQlpXNtq1Hf1624dB+8DaudlOhv5tWezTVF56gKBNAkwr6ix5D0zps8bkq7LwDVRUCBo6bhYWFHOWoBgpbQoDl5eWcW6nnXtIUmjj8XKR8IMrGsyezlcTD5lNTEg9L5uFR5F5kwDyhyiMP1sXv4OAgs8kCRvXu9Xq53HkzMzM5WdfE4aMuyurppQipun7GIWj6mPZepknoodz7rMoaeRljyirxdKNu1xmlc+dGnfNd5Zx1Q+t2u8nFzCNNm1bfjjt/qxx2m3xfETwFO88RbE9tdx1rVGyxfzRvqD3w8kCl7th6WLKf6bioEsoTrn2BQCAQCAQCgUAgUBMzn3ctSCAQCAQCgUAgEAh83hAWqUAgEAgEAoFAIBCoiThIBQKBQCAQCAQCgUBNxEEqEAgEAoFAIBAIBGoiDlKBQCAQCAQCgUAgUBNxkAoEAoFAIBAIBAKBmoiDVCAQCAQCgUAgEAjURBykAoFAIBAIBAKBQKAm4iAVCAQCgUAgEAgEAjXRLvvy29/+dsrWW5S4Vz/n3969MzMz6ap/67XouVXwrW99K/8QB6xT1edr2YrKe3Z2lp53enoKABgMBpkrAMzOzqZrq9XKPMtrx7p1smWqA9t3g8EgVxd+12q1UvlZp1ar5fajxVtvvTWVfqqLKmWtiqr99NZbb9WqTJNlrItJxt40UHcceG1XpU5l9SkrgycndE7Z33r3131n1bn0F3/xF2fAUDbZuWz/tygq2yRjs+y3Vcfdn//5n58BwMHBAfb29jLPvXbtGgDghRdewPz8PADg3r17AIC3334bDx8+BADcuHEDAPDmm28CAG7duoV+vw8A+PTTTwEAm5ubAIbttLi4CADodDoAhjLPtp/XjnVlnsI+zxtL+r+3ttq15rznn3cPUL1O3/3ud9OP2VYnJycAgKOjIwDA4eEh9vf3ASD15c7ODnZ2dtLfwLCvAaDf76dnEO32cCuzsLCAXq8HAFhZWQEArK6upr955T0LCwupP//kT/6kMZk3zv5i0ueNK/OA+muTvtOOL/aFtyfgGDg9PcXx8TEApOvJyYm71+CVe43vfve759bJq08T6+m4+5EmZN5F7x/GqSvfOc7+oc4apHLQk2+6767yrCr3ldUpLFKBQCAQCAQCgUAgUBOlFqmqKNJyKlSzUFfLOQ2rhL6r7Pll2j49CfM+fkeN2WAwcLWWasmx5ZlEkzBuW9m+Ozk5SRpDT0s0NzeXe0aZ5nNcaNtWRRXNRpmF4LIxblmqalZ+HuFZbstg5+NFoMjSPhgMCi0WF1G+hYUFAEMtMK3MlE9qdfbkVJV2rFuHJtqAv5mdnU1/U17RunF8fIy1tTUAwNWrVwEAy8vL+OSTTwAAGxsbAEZWp2effRZLS0sARhYLWkNOTk6S5lxlX91xWRVFliLtk/M8P4Bs+1A+E4PBIDNGzyvDJNByW28NbVv24dHRUeZvYGi54v8ct7a+3nrdarWSlYRXzzukqXpWQZ0xPy2vjEmgbattCWQtUoT1bjk+Ps70JzAcB9bK4D2rTvkmwaTt/ouy9tbBRY1VTw7qGLFybdp9MfZByhPsOllsg+pBygqz81z8prVYEWXudfq//UwFB+tHNwEKh36/nxEUQHbTUtQWFwU7GNmHupBxkeM9LDOQP1ROA1X6/7y+0+fo3/rsz4Pga7oMn9cDo7fhqYK6B+tp9atXBqscUSFuN5E6b8o2g02AhwJ111V3Gv7Pzzw57s0pW95xlUHj1NeTvSw3D1L7+/vpvvX1dQBDtz9+9vTpUwDAo0ePAAAvv/xycv1aXl4GADx58gTA0K3Mto8eUprsO0+W6SHXO/TYsacHB6vg0jFoDyTeOtcUvAMUrzoOgeGhiRttu+E+OjpKz2C99XCrew0g2wZ2068HgUlwmQeoy1AWaTsWHVKBrCsfkFV20I1WFbb2MKb7oib6qQhNjPnPy9p6GS59Tb27Duxaqmtqmcv6NMoYrn2BQCAQCAQCgUAgUBOVLVJFGrezs7OkbVC3Eas1o0Zobm4upzHyTohVXe+ahFdHq1GxfwND7QktUdSkMMh5bm4uaUgZLHt6elpYp6qkDU3DWqQ80zvv6XQ6Oc3RYDCYimufoorWV7XoViuh5bJjsNVqXYo7GHFR7pyXZXmrYk2sWq5x3fyagmcVUG2pWmyBrAa9yCVJMY47axlokdLnWgvA0dGRq/n3XACJOm4TTY851YzTGsG6Ud7u7Oykcq+urgLIuu89ePAAAPDZZ58BALa2thIBBe/nvVtbW7k2m5ubK3TnHKf/yixRrIfnbjk7O5vGH9chXrWMnozX59rnW0wqo6q49ukYpMXCWqZ4r5ZJ28Ra5XSMeNaTSbwp6vRzk8QSdZ7XFNQqxPabm5tLbWvXVCWU4N5nd3c3Xdm/auG1+yh9X9MWqarEKkRT4RcXjbJyl3nyfB6h893uw+nCPjs7m3Nht7+3n01a97BIBQKBQCAQCAQCgUBNTEw2oZS6SmfJkyBPetQm6CnSaoJU41BGXDHtk3NRrBY1Yny/alRYdlqiSJW7sLCAbrcLYFS//f39nKaXUH/uJsp+XlsVaQk9elJCT/yXocXw/PlV22q1uLaNgWqayYvQOtl3eNqjKvFfvwgos0yVBZNfhsXajinV1lKr6gXycy5pHb34SNsWk9SRGuNut5t7l8apUL5Re3xwcJCzCpRRFWuZq/RlE1BvAM5fln9zczPRadPSdOvWLVy/fh0A8N577wEYWaTu37+PV155BcAoRooxU48fP07P0lgpyo8m66QWT89iRPCdc3Nzad2hVpZrTrvdzlkh2Zca3O9ZHJvsO69O3thTYglLNqFjz8bqEBrzpLK9KPZmEovUOJajIstl3di0y4qLYvup5dMSQii9PecMCV22trYADC1SvI/Pn5+fz4xbfc/c3FzO0l8VVdq1bH7pte7+quy+afehF7/uEc6UxcHaZ10WPA8OygPKFdZtfn4+jS2uBdZaeh7qesmERSoQCAQCgUAgEAgEamJi1j4g63MN+Ex11CZ0u92cNokaUz3x64nQsyjYMoxT7iJrgHeC17gmq5nc2dlJmluCdLu9Xi+nZWm32+kZ1nJXlEhsXBRZ1+z3XsxQ0Sle/dAv029Y36facavBVM2FHXvsm06nk8ahajkvqk6e1kvZ36zW6BfJEmWhcVzeOPPaoKn2KGOg87THKpt4H8eUxu4wNsDGGxbF5jVpbaNWbm5uLmex0DJSFlGW7e/vJznFK+OPPBlfZqUui80bZ46pp4NqrQFge3sbwFD7TU0433Hjxg3cvn0bwMh68/jxYwDDpL387ZUrVwAg0acvLi6mNtB6Wy15EzFSnqVdr1Z2LS4upjg4xnTxu5mZmQyDrF41NknXoaI1YRzoulZkkVLGSLVMFVmk1APEzh1dt9SqYWV73UTy48BjEyuCMmra33u0zuehSRY63aexHdUiZVkz2Yfb29spxQCvtEj1+/30XHrv6FymhVXX52kwHtr7dG9jx4pa2q0Ft2681bTg7R/m5uZynghaD8vKrHWqY5WedB5VkZ36DsoF+93S0lKSLUxf4TGtNolKB6nzGtO69vX7/UwuD2DUqVy8AN98W2Z6LBu048AKYR2ARXSyQDaYmf+TQpefsZOfffbZJCjoJqLCns+yQXHTRBnpgreZY93ZXu12O7cwTZP+3JbnvE01xwk3DNxIUsADo00XBfbS0lKGIESv9p1NoOgQr24oKlissPM28F77nPfei0KZkPT6sMjtV6lNPXekJhevMrdLS+er76WM47wHRlTbHIMq2LlZ0Do3eWCmbDo+Ps7RRrOsCwsLuXl+eHiYk3U8aOzs7KTDoZ1fZW5/Rag7LlV227nMdtzZ2UmHJJbx2WefxUsvvQRgSIUOAHfv3gUAfPjhh3j48CEA4MUXXwQwOkitrKwkKnRV0OiBxV7r9p22VZFCTccLD02rq6tpbeFnbJPj4+OcApD9pBTUSoJi3Z6ryJU6dSo7SOmBynPpA7IKMbvZ17VJ62E/U3lSd+2q0q/ehrbdbqfP7KHj8PAw56Kk7V+njJPKDK9NgeyBVMMVWBd1qQWGZC4kdLHzcGZmJo1VPVDRpZZXndNNK5m1rtrmlrBFFZpWfhOfJ8WmTcXT7XZzddHxZ9M6qFLFU7Bf9B6iqBw2vxzLv7Kykv7WtQ+YPHSmCOHaFwgEAoFAIBAIBAI1Udu1zzuNWovU/v5+0mBalzdqIQDkzMTdbtc9LVqLlGeRGAf2t6qtVVdEYHjKZdltUPOTJ0/SyZcmbGpuDw4O8NxzzwHIElBYlz7V5k4bnuXNSyZqg8+VdOIiNRVFFhd9J8uhpCDUSlATu7u7m6EuBkbj8eTkJN2v/WyDmZuoY1nwsY49JQSwwZPaF0WUsZfpdlkEtUxZ65oGn2vgKJC1ElrNpD6rSZc4W2aFF7jPz1hmWjNarRbu378PICsjgWFfUS7oWLCybpL63Lt3D8DQrUat6ADwzDPPABi6IpPyW11trLaZv1e3Obrr6LOtpUNdo5voGx0DXsoJlotymRbBF198MVmkXnjhBQDAD3/4QwDAp59+io8++ggA8OabbwIYacbX19cTKYVHD6/u6byOW0/vd5wP3W43ufGxv9bX19PftDCyfXZ2dlJ52T9si+3t7VzAv1rhm0xUq65rdr6rG59aaIosUgorP9X6pNeipLHTWrfUiqTeG+wX1pNz5+DgIOe1o2X13IstmnLn8yxRrIcNUzg9PU2yhBZbzpOPP/44WXi5HyI05IHW1LW1teRSqzKI72lKpnseOWrVphxhHRdRHDQAACAASURBVD1PK/VuYTnrpH9pasx5rq2sC8fR0tJSak+WkeVXOWrnoLoR6x5jEgv1pPAsUpxDLDfHE4Akz9mHSo7SaLkaf2IgEAgEAoFAIBAI/IKj9GhWptH2rEIaN6CJ1/QetVBZX+8ianQNzAaai5UqI3jQhHPAMPiY5aUWl+UeDAb4yU9+AmCo1QRGGpi9vb2kzb1161b6nbV4eYkupwXV3mnAMpDVhvEUrxYdYNiHRQQg04Tnu221C51OJ7UftUfs5/39/VQHgvXWwEXPytYENXpVTZXVPh4eHqbxZGNt5ubm0jikplr91z8vligLtbooaQuvNpEq55wGzmrMhW2zJi1TZVZ4yrOZmZmkMaf2bn19PZWZfWOtpO12O9WR0LiNJmTBJ598AmAYp0ANMclwaEG/efMmnn32WQBI9OBra2vJIkNNMeOK9vb2kkWKY5Ia6c3NzZzlS33x68ZPedB0DTZugxrlp0+fpvoxVuPk5CTJ4ddeey1Tt48//hg/+9nPMu3CdlpfX09twbppndSSMy68drCETEtLS0njyvF19erVTNJlABmSENaFVlHGq7BvgCxxha2LR4RTt05qkVJLFDCc/x6xhBcbBWTlsrWenGeRsp4XTVuktK2sFezs7Cz1C8cj5+PJyUnqV0sIo2uaxoU2aeH1YnQ9YgmbxmZvby+NJ8oZxhx+8sknSUaw/Byni4uLafxS3jzzzDOpDdhm/J1af8etm/e5HUdKzkJLB+Xa06dPM+suMNoDLiws5NJelL17UngkGawHxw3bWuW4xo8CQxnA+qoXFZ9JWUro2nQZlikdmxwb7B/WSa2d7BPWsdvt5uJam5g/lckmbGN5TDK6MbKuIJrhmr/lJpadvLS05JoqPZe+ceE1ng1a10nLCb26upoWVQYi02VvfX09ddiPfvQjAKPO/fDDDzPPBYbMURSWyj6lZZkGrNDQDR6FGt2RlIufwlCZdyxJhrpXTavcHhuTdec5OzvLHU55eNrY2MgxWOkByrrIKfmJdd0pYyGbBIPBILfRePr0aVqkeFDn/Or1emkDbBduNcNPOxi2as4JT/hyzHOcffTRR0moc1wyv8/MzEwas1UwiYuVhW6+rIvOyclJ6hPOL27QFxYW0mHJbnRnZ2dz+em8xWoScEMwGAxS2bip1nHFA8bzzz8PALh9+3bKv0S5wLbv9XqpfpSLPFBtbGxkDlXAcB2wbjF6sKrbRzbvHTDaeFK2tlqttP5w3mxtbSXXvi984QupngDw05/+FB988AGAkTskx93a2lpqAw1gtodD3TSP23e6CbekIL1eL40lXnu9XpJPNuD/008/xYcffghgtMHVzS3lp27iPRdqvdaB9pPnwgsM29HmkdK2teuzzg/rejQ3N5cjC1CFmz1INSXDPfdzqwzZ29tLbm8cZzyE9Hq9zKEdGM2rs7OzTG43ftakTNf2tAdRzRPFPuNc3tjYwMcffwwAuXH2+PHjdD/lBseZsmdyH7W2tpb6zCqqTk9Pa+2N6vYrx9/e3l6SY5SNvB4cHKR6cM1VMoymx1QdqHKcZaJ8uHbtWpLVHnEb11rKNa5NnU4n7c3V9bcJl99x4bkg2/1pp9NJc0hDN4As+U6T/RSufYFAIBAIBAKBQCBQE2NHXal1w2pjlKiAmgvNP0INB0+7PDkrbaFH/VnmXjhO+e3vNY+KuuYR1KZQc0vL1O3bt5PrEU/+//M//wNgaJniSZkaU2Ck4dXPmqhXGayGUQOXqf26efMmgOFJnhpBajjUsmY1sWUuSE0HiSq9J62arMfc3FzSRij9NDDsV2r0bE6Vg4OD1Nf83fz8fI7mvYlgcg98lrovaBAvtX50PeJ3165dS/2jrmRaZn3+ea5i41I2nwdv7ClBiF4fPHiQtJuUESz3/Px8zko4bXjEKgQ16EqwY0lLVlZW0nxXWmpgKF88Cuoi94lxxhx/e3BwkMrL9tTAcLY5ZcBLL72UrDe0UvG7tbW1ZCXh+FN5TjlI2ffkyZOMdQrIzsG6LozqTq5pGYCRLJifn0+WMVrg7t+/jzt37gBAutIy9a//+q9J+/zTn/4UAPDLv/zLAIZzy8qVzc3NnNVE+62uHPesGdTQK6U+368uKpRr1CqzL9977z28//77mTbg2tztdtO4JDz6Zy+PTlWoS5pHew5kPUCUBr0oV49HiKBuaGWufV66j0nkeNGexLNaP3z4MPULr8Rzzz2XrJ9f+tKXAIz6d3NzM5czp2kPAyWdKqK/V6s75/Unn3ySrGsM7Kfb4uHhYZKBnDuUI3fu3En7J+6dFhYWUpvxPUpQogRLTUDTirCPnjx5kqzR7CPKq+Xl5bT345VlV/dL64o6TXjeOpQVSpRDecxxREvo3t5eci+l7OPeQvdPlPFNkn7UgbUiqeXUussOBoPUL9yza65br/xF62tVz4KwSAUCgUAgEAgEAoFATdS2SFm/Xw1O9GiXlTqS/1PTZQOS+/1+OgXztOnFYtmkYnVQFjhLTcLp6WmKK1D6XNaB5acl7ZVXXkmaPZ78qYF55513ks+tBvWxDtQaWO1P0/ACK+fm5lJ722Dy5eXlVF/V1vB3dfzlm66T1sP6A6+urqb60cqmSaBtvB3HnpIFqJbZxso17QftaUA4rtn+e3t7yUJKrT6/6/V6ubHEtpidnc1offn8Io3StBJd699KecsxZ8v4/vvvJ4sA60sN2ubmZqqn0oZXzYZepw5l2mYrB/v9ftKMKbU5MOwPa81Q8h0vyWOTc4YWmrOzs1yMg2ooqZGkJvZnP/tZogh/+eWXM9cXXnghWac43ti36+vrmUTXLAPLofTbwDCG0abJOA8eiQHnqGpkKbM4fu7du5fqzjiHN954A8AwboPjzlpxrl27lvqQWs7t7e0cRfckVlIdXzb2h23b7XZzgfg7OzuZ+EIAePfddwEAP/7xj3Oadf7+6tWrOep4pX+2wfNliYKLUGaRUnnLdtTveL8XN11mkdJ4KV6L9iiTWKTK5uhgMEhjQvcS3E+wXWih+dVf/VX81m/9FoDRuOQ82dnZySUsVvnklaOu/NAYKbu2q9WGZWLM4UcffZSzRNGaND8/n4gkKDdeffXVdGU9KSPOzs7S3LSWoknIJghvrbX704ODg3Qfy/7FL34RwNByzb8pCzSOnPJMPQu8dzaBojhGtSirFZvyw9Z3Y2Mj45UAjNp8fX099Q33t14y7IuA9VpRojT2gcZ4cU3SFCSAn5S+CYRFKhAIBAKBQCAQCARqYuwYKfU1tlohZcnhqVV9XamxoPZB41bUKqS/bxqen7X6Alvqy48//jid3K1V4Jvf/GbSXnzta18DMDrt9nq9pOWkRpDvAPKJ3aaZ2JZQi46n8QSGmj2W8TLZaBQe5SfbmVrUK1euJIsFGcfU6mfja5SBy9PAFr27adY+1bjY2AC1vFEbTk3RzZs3k9WA2k1aFU9OTtKY8zT404ZqkpVVCBhaMRh/YynO+/1+0pwpqxVhE3teFIuQzhtLu398fJyhnOZnwHAuaRwoMBp/mozU862fxPpOaHyWjRGhnNvd3U1jhW2ujG//93//B2AUF3rnzp0UY8TxRxm4srKS6sf6Ki2vpSnvdDqZWNQ60PHM8cbnLy8vp79Zt3v37qU5T+0yY1JeeeWVJKupZWf9X3vttVQXWlIfPnyYs0h5FN1VoX1jGdPUksJ3aRwvLQTsp3feeQfAMNaL9WX7KDOrtWIvLy8nTbZNMzAYDMa2SCkbqbWuHB8f56xUp6enuXd58RHeuuWx9jXJ1lf2W5WzNqF9v99PY54y+td//dcBAL/zO7+Dr371qwBG7cJ+29nZScyzHG/K7NhE7LhtHyC/R9na2koWWlovPvnkk7RH4n0cPzdv3kwpBmj15Zx77rnn0nzS9CR8hvWGOTw8HMsiVdUCxPG0vr6e1iFa3F9//XUAwJe//OU097kHZMzyzs6Om9Zh2vFEnmXKWhQHg0EuKTdj3B4+fJj61FpLlblZ52LReBunrmXeK2VJp1utVvLu4DxQ7xV+xv7S2GSuF03GWNc+SHk06DY4Ud0DKOC0IpYOXM23/ExpJYs6aByzvN7vZbUGhgKYjU0B9vDhw7RY2YG3sbGBb3zjGwBGm0EGIx4cHCQhxYPY/v5+LtC+KIfWNKGbdiXaYBlYd25y1D1z2oGUOsEsUYKW2Tt02HxKmtXb5iTjd3rI1TxMZeWaFizFp1KcW4r2l19+OQl6uk2w/pqPSRcmK6imLezVHUBN8xRy3JDz4KsBoVyo2BfLy8u5fpkWFX0RVOaxP1qtVmpjdVkDsjTT3Lhys6FjWBetorxl4/QVN8mdTid3iFH5bMkgnjx5kjZJlF08WLz//vvpIMz+4//PPfdccqtlfWdnZ9Ncs1Thc3NzLulOFehByrpsLC0tJYWD0qBbanOW+4033sC//Mu/ABjl9+H4e/LkSY5QaHFxMUflrrnEmiCbsP1+dHSU6st3P378ONMvwOgg+OjRo3Qf24Lj4fr16+nwq/1FudNE8LzKbI9kgs+3BymPEl8Vn5ZQQg9P1rVPFWjeQWoapEHeAV/HI+fMb/7mbwIA3nzzzSS3OeZ4WPnss8+STLEun4omDofq/mvz+z1+/DiX/2praytHcc4x9dprr6UDFNcoKl00d54q1K17tKaXqKNMqtKnMzMzufFz7dq1tA6RhIaKlvX19aR4Yv0pSx48eJChpgcuRiluoXs6zi91neaeVfeumhcPyB6qbcoZj1yOaJqAy9YLyMogrqGcU5ozkHXnmOS9HtlEE+UO175AIBAIBAKBQCAQqInaFimr0VftLLWci4uLmcSNQJbq1mqUVZtnNVNlp/pJTImq8WJdNEiZ2lOW9ejoCD/5yU8AjLSy1E5sbW2lE/5XvvIVACOt5fz8fNJkauJRS2c6Sb3K6Bz1Hmv9Ojw8TBogBpGyvt1uN0O/DYw0U6oVnbZVw3OvU42L5ybK8WQTPj/77LOpvtS+sO8fP36c+0yDlKcdYOkltKW2bHl5OZWJQa4cqy+88ELOpU+tvx4BzEVZovQ97BP2087OTtKyUjZQfty+fTtpMGmpYD8rvbFXp2m4W3ouRpR5atHR4H9gNG92d3dTuSgPOSaPjo5c9+cmwbGiyUotGcTS0lKaL5pMl1Y1Wqs0eaOl5Kfb30svvVRKl26pvVmGcWHbjX2zuLiY3IforrixsZHkN+tEa8wbb7yRyk2yBlI7f/rpp+k+yvaVlZXUHnbdmpubG9sipdZIjkF1HeQ7+O779+9nrBf63dnZWc5SwASot2/fTpZuTcdh32nLUgce2YS96rqv7lFW5ipJUpFLerfbzVntlRDLWxebhMo8S7Bz7dq1NB5p5aDnyunpaRqXTJ3y3nvvARhZP4DRvNW6NCHzdG21hDTqMqUyjb9jnTg/SCzx+uuvJ1c+ygYN/udazGdtb2+nvy0ZzjRot9XyyzVF5SDHJ62777zzTpIHlHmUK2dnZznZel4KhGm4l56enqZ9DNchtbjQokjrzcHBQXqWEs4Aw7WKslnnVJG1/CKgrsKcVxxT3MM+evQo/W29zTwXzCbmT1ikAoFAIBAIBAKBQKAmSi1SnpbcfleUPJBaMPrCKj2sDaTVgGqr2VM6zirJX8+DPY3yHcDo1LqwsJALDl9aWkr1+/GPfwxgdAJ+++23U/2ovaEP9PLyciZ4DxhqSm0Q+bQ0YwobmzYzM5M0TLyfWqK5ubkMXTsw0hxp4OdFaCOsZYDvPjo6SporWgeV6pPQxJLsT1pvlP6c9dPkml4QbpPw2s/6JS8sLOQCI6k1W1tbS/WzRC1K2qDjrMk+q6KV0ngBXh8+fJi0evwt67S9vZ36kNp/1vHs7Kw0YHyaMWxKJGCt8AsLC5mgaSBrBeFcUvp3YNhHNl5SY6SaiEVUzby1pClFriUe0GTiNkH57u5ushJSDlLbee/evWTZuX37NoCh5p1zjnXXhLKWvOM8eJTcGp/Eq6XK39/fT3ENjHllHMerr76aguM5NkkJf/fu3RRTxWetrKyk/qdVVS0rk5CgFMUnKUW0Wgr4N8cZ5cXa2lqaQ+wLxoTdvn07WRE0+aZSbNtr3fGo897G3nhU555FyloNNNbPI5vwiDqqyIxJUCbHNSie7U1NP9ecn/zkJ8mqSGsorYuHh4e5tDBqGWgCah20xARcW7e3tzPrJZBNY8E5zzn0hS98IVmi6EnBOXFwcOBau63HiI6LcetbxSqkMYi0AFL2sR0+++yz1CcsO2XN+vp6Jl7WvnfasVLafzZW9/DwMH3PNYnln5mZSXt165l17dq1JD8p5zRG6jKg8kRlHDCaZ0+fPk19SHliiSmaRlikAoFAIBAIBAKBQKAmKh/PrFVAtW2eBp0nWWoteZLsdrvpRGtPiTMzMzlmNo0XaFJz5LE9sR7dbjed0unDvLKyktPYkmpWaUGpGaNG5caNG+k0rzFZ9mR8ESx41vp1dHSUtBfUUFBjoVYdpSAFsswnF+kf68V42USL2q4sr/o828SZ1AxynOrvgLx2qSm/9CrtpppYa71l/5yeniaNmbW2Km2uavg81ppJcR7zjZ3Xm5ubyfecWk72xdHRUYZFERj1a9V50qRlytPMsx801oiaXF5p7X306FFOxlC+7O3t5SyoZTTT4zAMeWxwGm8CDMcT54l6E1j/c9UeK60zMGIz3dvbS9pcfnbz5s0Uj8NYHdUkeixkVerkxX5qrJSNSdvb20uWs7t37wIYxXTcunUrxa78x3/8R6onMIyTYJ0Y86oWKUthf3p6OrZFSlkubZzS8fFxmi8aT8I6azwc/2d5aZFijNQzzzyT5hzHhVqHvGvdsefFSHlrvBe3YPcVZRYptUJ6Hi9F1oxJLfRljMKWnlxjbml54RhUCnuOT/Z9t9vNJUlu2kPC8/KwrL3Hx8eZ9BVAllGW84iW29u3b6f72AaUGVtbW5lYTGC4XikzsEUdWV71Xitn9/b2ctZ3zvsnT56k8lnPAo2rL6KlvwioRUo9JCwDo8oLymPKMMqQq1ev5rwHymTaRXonnZycpPalRZTX/f393H7cpkRourxj27m8jYq6jXABs6QK8/Pz6beWgtcLsm36gKGB3Xy25/5kN+br6+s5Om1uBO7evZsmHevLA8nMzExaxNU9ie+2G/NpufjpwVFzn1gqYnX5sUH903IPqwutE4WGwgbLKv25PRQSGsBM6CH+Ms3ZGrRqXUIPDw+ToNcAYGC4wbWZ4svyQTRRTn2/ltnmpDk5OcllgtcDhXX/0XdUpbadFPYA5Y0HlQmcQ7xfqXI5l/gdF6ter5c7rOtBqknlkbaJpYNWSmk9HHIhpazTg5TmoAKyeYF48NIFXA+WwMi9dm1tLcnISWBp5NvtdoZACBhuilhubmIpq5eXl5NbEt2UGPD/6aefpsWZbbG0tJSey3mm47uuG4n2tSUcUbdspYQGhn3IA5E9RKyurqaDFDe8bHcde7oWeq53vKfueNTDkn1u2QFNcwRaimollLAHKi90wHNHmsRd0T7DgyezW61WWns5j9gGe3t7aVza/ZGmk1FX87L31+0n9s3h4WGOelwVE9ZV9urVq+lgTjc+jjMlkeH+SPMY2XQL6opuMW0qcT3k2zxnmpOIfytJDzDsI+uiPa19UhlxlB4wNBerTZnAcbSyspL+5j1KNmFdSr09+kVC68myWLKTwWCQlBF0KVVlTB1XT/t3EcK1LxAIBAKBQCAQCARqopLKrIy4QE+o6opETSBPi0Sn00nPU1dA/k61srwWWW3GOfGra4u1SHmZ1nlKv3btWjqpU1vJ8q+srKTgNp74WVale+ez1Dw6DTcrD+raR63YwcFBzmJITd/i4mLql7IgysuAWqRsIuGzs1FCQboRsN/a7Xbqc97P+us4u2yti4eiJLpK/GETAp6cnLjW1mn3n2eZ8pLo2r5gPdTq5LmQXjTtqn2PtqFqKC0xCTW7T58+TRpz9ofOM3X55fO9dzZVfsB3VbWa/E6nk0t6SFed3d3dpEm2rjD7+/s5+bm7u5tLDk1L1tbWViZBZx3oWPEIGlgnrkO9Xi+9l65UvH7xi19Msp3uSSQAePz4cbqP93Q6nRw9sMqTcftM+99aQ09OTnJr78LCQo7em/22urqatLK0pCkNPZ+llnrroqrW0Ulc+yxZlBIWWaupWpHU/ZRXS9WsrjvWDbpM3o1jkaraBva9g8Eg5xGhLnWEtT55rt1Nyz5NkmxDAFiOXq+X2lmTOjP8gZZOjq+zs7M013SuA0OLnFqiWAbPm4HXptat89rOko+px5RN9qyeBtZ7okmL4XnQ59k1H8i7/bPcvV4v51qu9bdj8bx310XdPvW8Qzje6No3OzubrJ3WK0DPGE26/4dFKhAIBAKBQCAQCARqYmIuQC/mot1uZ4LhFZ1OJ2eZUf/mskSyTZziVeNlNZnUCu3t7WXiiICh1kSTFgIjrYwmB6N2lr9TX+bLiLnROnr04TYwUclErIbdasUUl2GlUm2ixgBZy5vSnxO8R/32rYbC80VuCnWsKl7bqoZJfaH1mR5pgfruX5RlCshrh71623pUfb5nrZoGtC2tVV1JdDSJNzCMAbBafpV9HoHPNKxtakmrYqXS2DYvforyj5Y3WqT29vbchJocA5Q51BpqgHRVlMUJqPXGxjT0er1kuWXMFq1Ozz33XLLeMHD+nXfeATC09jJGivFfV69eTTEEbBdPDlVFmeeHjj2bbqTVauViGTQ2jHEtVvOs8VBqKbHxlJPE63mWNNsu3vzV9cfGQ6mm3IuRsgREZbHXk1gOy+CNTy/2UfvVprjQ9bbOejFOfdSCbGUb50673U5/c55cu3YtWQLsnk/lAPdFmkrFeiKofPJInsaV7XXaQ9vakkZo+gjbH7p/8Cyc0/ae8GJ69DNbJo1/11hLvbbbbZc4Y9prE3HeXsFarNX7jWsRx6S1rjaN2qx9Ft7g9vKsaKXthkkDwbwA3SIT3DgdqhO16HlKYsDF9vj4OC3AXJjUFYaf8Zk2ULOo3Bd5ACnbPH2en130XM/V5PT0NLN5A7IBlt5hg/eW5fSaljtZlY1tGcrcbYrG2UUfer13l82Buu6HF10fLbtuau1BhONJXd3UfRHI5ri5CJfSOs9V5jTL9tfpdDJ58YCRXOz3+zlGP92gW3l+fHycFr5xUDSnT05OcpvxlZWV5FLETR5zwzx+/DgtxnTfo9vSBx98kNYCJaew+bjUvXYSoqSy9daukZ1OJ3e40vxmlvFS5aO6dAFZt/YmCE/0Gfa53uFQDz/WpU/d9+xnuoeo4oY0SXhAGc5zI/T+BrLMgp7bbRU00U86vuzcn5+fT3Och6der5fut3nC+v1+Ju8cMJpz/X4/l4tS5U3RgaouxmmTon4A8uRH3jialuyuuh8o+9sq+4C8+5vnTnnZ4RyEtrHm+wSy7MyUZxxvaryxRG9NIFz7AoFAIBAIBAKBQKAmarv2VXVFsqdcPQlbTZ2aEj3tbJ1314EN0NWrtWposDQtHBqIZ4P4CHVp8E7ATQa8nQerjZibm8u1Kfut3W5PbAaddt08UgIgS2Wq/59nhfS0ld47i76bBE220SQWz4uwIlZ5z0XOi6oo63OVAbauR0dHOWuJus9Y8oDLtlJ78IK/bYoItc5oTjBei0gM9LlNQC1SlkBjcXExlY3aclqYHjx4kCxLDJxn7qVHjx7l8spcv349l0+M79PUEnVRJnfUqq7rl+0LpSv23J+BrEVKvyuzRNUdmyqLi+SqWiK0Tp5LHzAcX0V5pHTd8lxl7bUsZ1sdjGs1KtP4X4Qlyr5LCRWs69TCwkJKa6N7H2tt5/zW9BvW1VctO56XUFOyUPcIRd8Xwf7uPPe5nwdUad/LsEKNQzph11uOW/Uusp5hk7iIliEsUoFAIBAIBAKBQCBQExMn5C2C1TDp514APL/zTsrT1k57wbsst2rqqE2xCSg1PsLzfa5ixZhW3JQXOK70xrZ/NPmhJjRsulxNoczKR6hWtKgOZf7rRe/9vGmjJi3PRVpFP29tVwfeXNW40KLx50EtMXVJNppG3T6xlhH1LKAsVW21jaHQAPdxrQJl81/TI2iqDfrSU1vJmKkHDx6kpI60WjFG6sMPP0zxXiSb2N7eToQbNmmsl2T2PFTRnHsaVV03razWNiiLC62SbH2ccVdm8dfnauoU1qnIItXtdjPkEnrVmMPzxgb/H3fc143lrGv9bep5VaBz18aua9ydTYWi89paAfr9fvq7KMG6Puu8mJxx5d6k8rLu3mCaZWkKn7fYp3Fg+8KzQNv0PmGRCgQCgUAgEAgEAoHPCSamPz9PK+P5vvPk6Fmapm39KPP7Vp9gq2316C2VhcqWe5L4mmlo61VrqT7QRfFqaq36edVeeDErF8XQ9POESeoxbWvx5yFWqkoZVKtux9h5dLje/Gqy3uNau8tYnzx4VhONx7TWD48au0koBbmWg3EeZNij1nJjYyNZm8hORgvV9evXE7sfY6W2t7dTTJXGlgLNxXxViWHQsUewvzw2Uv3fxkM1ERelUOtXUXyQekuoZcTGP3kWKcvapzFkXjm8a5PWUA9NW6LqPLMqNK7MpjvRNmYZNemrzmMgGxdprVWEZ30qsxZchvxvMvbsMjFJPT4P5S+C5/113j1Avk7eGaau58zEB6mil3nuB/zcCm2915tcTULfXRZ4Z/nz1SXRCl4vl9Z5B6ppbeiLFivAp7m0RBi6AZo04PPzMgk/D5vxaWLcsdRke9R1c6k6B36e+qxIGAPlgcsXWccy+TDO7z3ohsjb7Fs67nFyLhW915bVHtparVaGtAAYHai2t7cTxTldxXjounbtWi5H4M7OTqJLt5vOw8ND142pqboVfUaUpd3Q67QOUPad3js8t1hC3UNtzqhOp+O69PFZ3jgoI5uYlptxnedeKBPbIAAAIABJREFU5qEMyCqwy/JGWiInnbuWdMJTkOieospe76Ll/y/qoeP/D1B5oq7N+t20EK59gUAgEAgEAoFAIFATMz/PQd+BQCAQCAQCgUAgcBkIi1QgEAgEAoFAIBAI1EQcpAKBQCAQCAQCgUCgJuIgFQgEAoFAIBAIBAI1EQepQCAQCAQCgUAgEKiJOEgFAoFAIBAIBAKBQE3EQSoQCAQCgUAgEAgEaiIOUoFAIBAIBAKBQCBQE+2yL7/zne+kJFNF2ae97PEnJycpu7VmlOeVGbOZlZz/a1ZtQjOPF10B4K233qqUuvjb3/52LnGWl0uLGZH1XfyMZWQ28Ha77daFsJm/T09Pc8/3ME6dbP/o/2XZnW27V4XNXK7wstr/2Z/9WaU6/eVf/uUZUD0DelnGdOK877y/yz4j/vAP/7BSnd56662xk7ZddNb0b33rW2PPJ2JaOerGbYsqdaraR1XKoJnWCR2nBNtJZYIn4yyq9pHK8SJ47xwMBkl2eWUsm/t2nrVarZyM8ebsd77znVpzyeuHsjaren9Z/9a9v2o/TSIf6qAJWTKOfBhXHnjyv6wOHJd6tWPbe9a3v/3tRsdelbGm5bBzjPDW5vPGIL+fZF9Uhkll+yRjsMrY+9M//dMzINuW3K91u10AwNLSElZXVwEgXZeWltIe7ujoCADw5MkTAMDHH3+Mu3fvAgDu3bsHAHj8+DEAoN/vY35+HgCwvr4OALh16xZu3rwJALhx4wYAYHl5GcBw78uy/cEf/MFU+ugyMYnMqzs26o7FaewfwiIVCAQCgUAgEAgEAjVRapEi9ARntT1qfeIJXrUA1KbwlN/tdpNGgFYc3qNaTmpC9XmelnYamnrVzmo5rMaIGo7Z2dmcRYp10rbw6lakoaqDqlahsmeXWWgmKdu4oHZHrZRlmskqFqlWq1XJWqWffV5QV8v+eYCnIVVU0STV0epeJKqUQT+jrCizSPEzlQ9F76uDMkuXWpw8y7n1NtD7Pe0+68q/KRdnZ2cz8lIxrjU8MB6qjqWm5xWfV+X9Zfe0Wq3C+eR5yOi+wnqVXAS0bHb+sR5lcu7s7CxX3mlZ+6viPNle9rn3jKahbefJJ2AomzqdDgBkrnaccl/b7/exu7sLAOl6cHAAYDjWbJ92Op20j7HvabVal96HgWqo0k+VDlJqGqdwOj4+BpA9SOlA4oBZWFgAACwuLgIYbpB52CD4zMPDQ/f5ZebwJqGTz3NzIawQb7fbuYmimyI+g+2kZnyvDHXrxTY7zw2yrE4WKrirHlbsBmmSflpaWkq/VbdQWx6vbGUHL1u/MneR88p9kYKw7F12Q6v4vByyPJdg72/+bxezMtfLaStWqqDsoFjWL54s0I1fE2PM21B6cpZ/65V/q4yx5bIHpLm5uXSA4n38H8jPx8FgkJMdgfEwrnJi3GeNA+9A5c0B+5muA3ZTrHsKjlGOXe/50z5Q6ft0rbEHOk/5YOdakStjk3K/7FnnoeiAXOaiXKQEbQre2LVK/U6nk5T6vLbb7YyRABjuSwFgb28vd5Dq9/u5+nAP2O1200FKn8/7+fxp4edR+fp5Q1UZGKrAQCAQCAQCgUAgEKiJUouUak8seQS1PWp25ql7cXERvV4PANJVXbX4LJpFearf29tLp3/VolqLRJk5vA7KrEJWczsYDHJaMA1epOWNbUAcHR2lZ9BEfJ6mpi7YnoPBwC03//e+K7NOeRYgXq1VTjVq1r1xHK0fgzI1SN27etaqonJ7dfKsVVX64CKsUZ7G1vaT50ar9b4Ma1UVy55HVqDWDktSU7WfJtGsToIyt7lWq5XqZq3x3txouuw63ykrrDv28fFxkr387OjoKGeR8qx/1MCq1YnyoSwo3rMsXwTKNOj2njruaJeFsvXEw7gyr2l4batrlK2ThgB4XiG88n67DgGjfYvubca1SpV5j3hjqdVq5dxnWQ711FEPHd5jSazUkt2kzBvHul80V7S+1hLH74Gsh09T3kaeW6S1XqpFijKs3W5nZCIw2qfu7u5iZ2cn8xn7qNPpZFz6gKE3lnXt47vLvIEmRZnMrfv7XzTL1bTqExapQCAQCAQCgUAgEKiJUouUBm1a33hqH+fm5tKpe2VlBcDQmsAYF6tl2dvbw+bmJgDg6dOnAJBO+f1+3w3Y42neWoKKfIfHhWqSreZItU9ad2CoTWJ9qeFQjQM1HGVBp0RZ/NR55faCalUDZ7XRWs+ymAxr2dHAcfav95l+V1frx7Gkv7VaSM8y5sVNlcV2VdXKev11UVpqpaH2qH0Jz2JXheRgWtA+t1ZknQNe+gSCn3mxeGXxbRelVdN+sL71ZaQ7qqG08UFN90uZXOP1+Pi4VD5YrbemsfACtq2md25uLmch8DTR00LZWNG5ZeXURc6XqvDifazF2pNXXtB93bjQaaAoTlDj9xSe1UljUdiH/F3Z2tOUZcBaY7wYr7m5uZx3CrG8vJzmDOvNOJzT09NUP409tKQ0TVvjq3jLlMUwn52d5frOk5e65yuKlRw35kvLZS1G3W43Z01X7wH20d7eHoDhPpV9sr+/n7mn0+lk9oNA1iJlY6SOj4+nIk8mWffqxvleNqp4FFwUwiIVCAQCgUAgEAgEAjVRapHyTvVW+9jr9ZL1gPFQnU4n/Zan+Y2NDQDAgwcPUhKz7e1tACPNUbvdTux+fJbSpdv4AlvGceEx3Fkt+WAwyFGca2I3xvRYrdLp6alLXVrk31zmd10EtdTxb5Zb/2e5y+iNPYp2wrNqKPuRrdMk2r4rV66k8luNkmqyPYuUbW/PglmXucq71q1fXe229y61IABZa7HHnmYtvF77NIWitm232xnrpF5ZB2BUp6Ojo6Tps9admZkZN8m3LYNimpo/7RelyWU9+J21ZjOmcn5+3mV0mkZZFZ7V1qajUDlu40OVmc9qdTudTs4qfV78w7SsPZ4ljXXwLHCepR3wkxNftpbWiyfyrLqEnS9erBEw/Xp5z7fpQgaDQS5Ghf/PzMykfYL1kFlYWMjtUVTWWDnusQJPUifW4+joKNVFmeFsP1FWXLlyJVcnJoI9PDxM8oJXtdg14dVSBUUaf9bZeurMzs6m3zCWSOPivWTfRUx+49ZH13zOaZVTNhWPeu6wrPSY2t7ezsVIaTyYle1qkbKyle9qCuPKpM+Llb0M58Uhet5UZZ5H00DpQUrdUawJna5sS0tLOYKFra2tdHD69NNPM9dHjx6lQUgBpwcxbqB5XVhYyAUueweBuvBctPRQYTesAHIHKQq+lZWVVF7eo8GIVhjookxMQgHKcuhzLHTSajmsi1hZ7o3zDkhFJA/juGByTLTb7dwGQDdnVVz7tHx1iCS8HGAeIcI4qOJG6LkjeXnc7EGE7VMkWKYVnGyh/cB5oZsKe3Bg+fv9fnKdsEG9umn0XOfsu6cBVYSoG4hdeHXR5X2qgAKG45xKGC7AOuabqIe3SdYDkb5b71H3Pe/QZAOoveB+la1Wxniu403Cc6/StcweOg4ODtL6RpnKOuo8+zwGY1tFi0coZBVS3tyd5oGqzubm+Pg41UGD/YFhHTkeKRcIPeBT1szM5GnHdbxNsocoq5MlLeh2u6lsvI9hDqurq7lNN/dMe3t7aX6urq6meyw5zEWSt6hiy1K5q4KZdeG4pGJ9e3s75x43jfKp67S3BtnD9mAwSOVh+fQgxfKz7XVPaGXH4uJi+syuyeMoYstQVzHc5DObQpGbsbd/VLlmla1KMGIVYtNS4oVrXyAQCAQCgUAgEAjURKlFSk21NnEZT/Knp6eJNILalc8++wz37t1Lf+t3wEircv36dQDA7du3AQC3bt3C2tpa5vlnZ2fp9E8zuKflnASeud9q9vQkyzagVvnKlSupTjZYcnt727UmeAHDwGRU4bOzszkNsp7IPVryoiB9L3herzYQWDW2nptg3VM/21bdFT3K9Sr058R52gjPNbFIkzmJNaoMRe6HVrNCrVmr1Up/e3Ti1sXzPEr0caHa/zLtEeuxtLSUcd9VHB4eJi3g1tYWgJEb8P7+fmGSWODiLFOeax8155R1dGHe2trKBCUDI4v79evXc/02Pz/vuoIA49VHXaI9wgHe42ls1QVRr+q+p65iwLBfKLPZV4eHh0l+W026El00CW/cKXmRdeHZ2tpK91+9ejVTX3U3VQ18WSLhujKvqrXcs7hbF16P9MC72nE8LbdfhR17SnqhWmWOF85zWqQoC4CRtUCtMt46YS2qqsUeVyOt7ee5GbP8arG16WAoKxYWFtJv19fXAYzWwHv37qW6c8+kdNtaHl7ryomqLnTeemjnBdu41WqlvQk/Y5ssLCykfSPr1iQBg+4BiqzqaiHk/aenp6mMdrzt7OwkKxXHjz5LvbRYR2vx0nE3rT2EhTfP1T3eI2+6THhrlJVLmizeutIPBoPU7pxnngsz4cmAuuMwLFKBQCAQCAQCgUAgUBOVLFLqs88TNbWKu7u7SfOq8VCMkeJ91K48//zzePXVVwEgXZ977jkAI8sKMPJP3draygWd8uQ5jqVDUUSOoNoC1ejbeALG8Vy5ciVpmPkM1lvpNNXKZf2KJ9ECsDwaPGk1yPPz8zkqTqXs9HxIbQwB2121y3q1Gme91tU4s4zn0Z97CVvrxEHp/562rYh2XOPomoCW36ORt0QrmrTRxhFpuZqMtfFQpvkm1OJCTdFgMEh9TA2sxvrRF51ay0ePHgEYBmBrugSgXA7U1c6OSwiimllb9sePH6c+Yv15b7vdzmg1+VmRlnAceadaUaud5TsXFhZyRD+9Xi/9TS2rxnGwTDZGr9/vpzagVnd3d9e1VPLapHbWxrwCo/ZbWFhIdeH44Xh69OhRLp6CMn53dzfVxSPYuSgtbhFpB/vRI50oSnHhJTRvGmXto6Qm1rqpxAxsd0JjVUjIoN4qti7sbyBPdjM7OzuRV4v9rVo9OB9o0RgMBrh27RqAkcWT9bh7926SEa+99lrmnpOTk2S5otzQ+CP+Tvcq447HMvmnMp5rTL/fz8Wpsb6Hh4epL1hv7Qv7fPU2aMoypR4tVs7quCOOj49dkglgOA6tlVS5A6xFan5+vtAipV5P04Kux2xXJWxhGTWRuv7OmxcXGR+q+yBraT8+Pk5zgXODZwZgtJfg+OMeWdct3ZeXpf+pgtKDFHF6eppx0QBGg2tjYwMPHz4EMHJl6ff76VBEofDFL34RAPDGG2+kz2imZiPt7u7iwYMHAEaN8+jRo+Tew4bThaDMtaIqyjbJGrTIAceJwkV2dXU11Zdl1I73XN2m5Uph3T5042SDIRcWFtzM3nyOXZR1o0SBwsG7v7+f+RvIMvRYYVu1HgpvgnuMRUW/8zb7ZeQaHquhR0hy48aNWnXzoJsjm49C3eCUkAAYCgEN4AWyJAdeTqNpCEBvnOu7OSY4DpQFk3XjgWp5eTktvPoZMBy7PFRRLuzv72fyvTWFsnbyWO/sodUjouAcYt8eHBzkgtI9+TBJnylBBNucMkAXf7on86pEGLyf8sLLt8I+brVamY2W1gPwFVjTYhmzCqvFxcUkt7kJV5d0mw+QbaGHMtatLO9N0/BclZSdzrpy6yHXulLqeJt0A1EVZcQW7XY7d1AHRrKObc/5/vDhwyQD2He6vthD2dWrV1N/Wlf9cdZfVWrYNUkVFHwX5fH+/n4KXWA4A3H37t20j+L4pIK53W5nXJuB4cFE56LWram+LJM5qjC284jluHLlCl566SUASFfu+VQ5yHGs47EoR1bdsivRkWVznJuby7Xd0dFRzqWP1729vYxLvT5rcXExrWO6v7IHtWkpjwgvVEFdpymvuK4+88wz6TO7fzg6Oipt92kpkTxyMfah7jHYTzx3UHEJDOsFjNY31nFhYSFHbqV/jzt3wrUvEAgEAoFAIBAIBGqi1CKlhALqygeMtA9bW1vptEtT9Pr6Ou7cuQMA+PKXvwwAeP311wEAL7zwQjolUpP+8ccfAwA++OAD/OxnPwMwIqnY2dnJmVHVfa2uRsk7sRNqbbDanVarlbRlSl3Mq80Boc+0ZA1Na2DZJ3Nzc0kDTA0423h+ft4NHLfufurqZ4PIqU1Rs6rmyinSbo5Df87x5pEvlLlDnkceURR0qcQS3jiwbo76HV1Um4BqudknKysryULDKy0FMzMzqY+pmaFmc2NjI2mX1O2vyAWvKajbIa+UJSzjkydPkgaMY5Xt//zzzydLADVnSrZhNVatVitn8ZkGnTZR5BLFsUGtsVLl2jGrLh9Tz3HhWKV5pUzo9XpJLit9r5ULam2zlijKoZ2dnWQ94Pjb3d0tpLQfh2yiSvoA1fqynxYXF9Pc4WfUZN6/fx83b94EMGofjkOWGRj180W489lx0263U5+oVZHl5Nqk7jpsZ+uqtL29nQvUPo/Aqa7M8AgQrMVW11ZNe2IJP9juOr4o69iH3W43Zz2dmZnJEDKxnizXJF4h1otBxxnfxb3SxsZGmvcvv/xypr4//OEP8cEHH2TahxapK1eu4P79+wCyboLWLX8aVnktj67F6kZoZTvLOhgM8NOf/hTAyCOJa+X169czhGVF7xzX6uFZMzyLlLVOHBwcFFqk9vf3U1mta/TS0lLOta/b7ebGm+4jmnDtq9LXs7OzSc4///zzAICvfOUrAIZ79rt37wIA3nnnHQDlOVCnCdvH6trHNUz3oJQBmloJyHpePPvsswCy6xzfo/uOSfsiLFKBQCAQCAQCgUAgUBOlFin1rdZ4F2B0elxbW0taMGrzXn31VXzpS18CkNe8HB8f45NPPgEA/PjHPwYA/O///i8A4P33308xUnx3p9NJ2iRagLyknnVxHi0soRpknmqpcdD4AWo5qIVWn1u12uh3QJ4ufRzQR1nhxXDYQFvV1ni06ZZym/Cyc3t06RpbVddCQG2D1sWiyPrkxT/ZMnoWKRtPdF6M1LR9nFWrRi0r5xF9gLvdbpqTVhtd1G7TIsmwUMuU9fEfDAZJE0aLtFrSXnzxRQBIMQXE0tJSaoMy0plpWH+9OBXODU8jyf5YXV1N5DssM7XsKysrmUBYfU9TqBLrcHx8nMaRalEpzzRhpb3fI5bg32qF8mLB+MxpWEd1vrNN5+fnM6QmwEjj/PTp07TuWO+DTqeTs2Z7lP/TjjXSJMlqxaE8oMVa00dw7bbELZ1OJ1lL2IdHR0duXCVR1zJQJnuVwErJQFgPrvOUAZwfR0dHmQTdwGjsHRwcZCxAQDbuin+rVazu2qQyz655hJK3sP0/++yzNPYZI/XCCy8AGM6T//u//wMwWvsYe/vss8/i3XffzXx3eHiYsxYr/fM0YkXVW0I9WdgHGmsIDC1TNpaNc+3VV19NY1VT3TQFJajyLFH8zpJn7O/vuyQTwLDNeb+Nl1diHiXkYTmszLsIsgmdU9ybf/3rXwcwshB++umnyRLFfTjH9MrKSsaqa587jbIC+bhgJdhh+x8eHqa5wDHGs8PCwkKSh5puhXWycqhu/L6HsEgFAoFAIBAIBAKBQE2UmnM0tkc1r8AoduHKlSvptEst8gsvvJC0DTxJvv/++wCAd999F//1X/8FAPif//kfAMCHH34IYKgBoNaAz19ZWUkaKfqB88Q/rjWK8DSM+j8w0sR6FilqnFhmIE8Pr/Eh6sNchRGsKmiR0iRl1gqmcQiett5asDyqZPULttqdubm5DF03n8Fn1q0TrRMKz3Lo9V1REt2iOCh+V+Uz77uqKKMO1fJbxrODg4OkMbYpAJTC1ca9KCujxhCxj5tk7FLadmW65HesE7V8m5ubaaxSNtCf/oMPPkgaM7I9MV5C6YqV9YnjsCyR9iR106smKNe2ZhlYL/qjK8sY68/ydTqdXEyHR38+CTROiOOG0LFGTayXvFkt7LyfY8peDw8Pc/JH56oX49akFc4bz15yR95n4z0B5GSZRws9beuTB41TUap2jkN6SagVh2uRjaU5ODjIMfqpdagJK5sXr2bnqFrNOEbm5+dzcaGso97HMtIKorG6ajG1Sb8n2Tto+1iLBuvU7XaTBw1x//79FEdE+faFL3wh1Zf7IFrouZ+6efNmku1q4VV5oeUaB1VikTSWjTJrbm4uMfHRU4Df/ehHP0r15frFdX1paSm9i+3Ubrcbs+zq3LD7F/XY8MYKZTQtHhybOjeUURcYzrsiZl0gz2SraQnGQVn72L2cpsWhVfp73/teuv7gBz/I1OmNN94AMLJq6zMvA7p26P7WpuBRLzZrOdU9u03EXsYoC1Sre6k0UXcIGwDPSXPt2rU0kXj46ff7yW3vo48+AgC8/fbbAID/+q//SiZsTjIW9OrVq+lQxg3UrVu30rv0wAIMB6NSGE4Kb3AqwYVumoDshPQ2v/zfZv5WTELDSugGhn9bKnKlJ9cB6G14WC6bU0UD1TUgnVel/dT79eBVFQwgrEtZfp77nvcZ//d+V3QoayqHmf2/qIz2gE5hv7q6msaozX9xdHQ09ezpZfm72P/Hx8c5pcPJyUlyveF4pMvvxx9/nNz+uNHgpuLq1auZZwDD9rGbc5t3ook66rPVNQ8Yyj4qelg+1uvJkydpo2cPVEdHR7lUBbqhaALqVmIJIvQ9drFXFyrv0OQRFQDZAH5VRNk8LhoEbnOZNAWPkt4qtnTDYeWUKlDsZnMcEp1JoQcSTUli1x3KYM0hY9cfJTqZVr45lWu23YnT09Nc3i6lZebhkPco0QnBjdLm5maO/vzk5CSTt03LNcm6qwpCm2txdnY2yQP2xdOnT3Hv3r3MM0i+cOvWrbQfomLpG9/4BoAhMQP3QEoipe6betU1sy68DaSuvzYvz7Vr11Lbv/LKKwBG4Rwvv/wy3nvvPQCjQ4nmO7OH+JmZmcbGoRe+YMk59N26dlqXPs6twWCQ5INVXPR6vZxCTJV+qtDm/5O49tUh4zg5OUnrD/fl//zP/wwA+Id/+IdUxl/7tV8DkCV8aXIdrYqyfZjmw9Kcc8BIBqyuriY5Yvcds7OzE1PrewjXvkAgEAgEAoFAIBCoiVKLlJ7irNuQal9ptqXZ8OHDh4nG/Ec/+hEAJM2EBlzS/YDWpy984QtJq0HaQs2EbbWpmih4HJTRdVu3tm63m0teqxovSzfOa7/fd4N3realLGj/PGhyVutWp246tt6qqVN6bCDbJjZxXpFFihoy6/qoNOtVQetEmaueaok9q5Nn2fEIKIq+81wHVZvRpJbGs6iplozum6SWVep9zkmOHfalWiFVKz0N7VIZTT2QTQ4IZK0d1PhRfuzv7+fSLFBbe/v27SQ3OKa8vtNyNZ2gUt1elQSEweHsG753c3Mzafn4HV1cdnZ2chbrpgN7NZG552LCe6w1e29vL0NpDmQt7dbSoRZs61KhZByeZaRJ1z61slhr9uHhYaof685yXb16NfUnv1M5bq0YF2GN8qzx7E8lAtG1CBj11/z8fGpv3q+JrG0Q/LSos/XZakkDhm3LMnKN6fV6aa5Y4o+bN28mS45N+Hz//v2M2zOQJRWwa4MtY9262XZXEgyWl/Lq6dOnyUOH8pweOK+99lqScaRB5/9LS0speJ6eGjr/rKunpriYBHbsaRoctSKxfiTOIJHGrVu30t6O9Vb3YWv9LbOk1e0jlUU2DEGtOWxDzo2dnZ1MAmWtK5Adn8DIIrW8vJxxd+R7rDurWlSa8BapQiSkxAwcP6zvq6++mrw93nzzTQCjpMlnZ2ellPrTSshLWMs7kE2Gzvam2796y9E7jrJDZY+m3eB7JnXXDotUIBAIBAKBQCAQCNREqUVKrTKWmpeaSaW6Jf3gRx99lAIn6ZvJ0/3a2lrSWDDgUgPLqd3ge/r9fi5xp8b6jHuqLzt5avCz+vV7liiWy2puNRmn9YVt0hcYGGlF5ufn0ymbGhPV+nnxWzbOQbXjVnOsMRyWUlStVDYGQoPnq6LMIuUFsFeJa1JrjP1On0V4GrKmNbb2eRpLoNZCmwRUk3JSo07LlLa11YhpG0wzcSPgE5eoddMSNjBm6vDwMM0natCURpZzS9MhWAKBaWnYCVvHdrud05xrndmndr4NBoNcUshJYhw8UAZobImSRgC+9WlrayuTUBfIkpZYS5T1VQdG8qEoxojXaSR+9IKI9/f3c2OE3hU3btxIspTtQ00u2wnIW/v1+ZOgLMDZk1MqD9kvtOCWESHpOmq17pPGflp4iatZbvUwYfsqvbS1WFDL3Gq1koX31q1bmTq12+00ZvkeTbxaluqkLrx1XGU2y0+N+ccff5z2SFzfOFdeffXVRHHOOErKvvn5+YxVCxjOP77LtnFTFgK7VmpcD7/TBNGaQB4A7ty5k6OfpiX+4OCgloW3bj+pZ45HfsX6qPcGMJR9NjZKSVE0sSuQjZGyJGhnZ2c5q4rGSNWl3a8Lb4yzb772ta8BGMoJelKwTrpWlVmi7P+TjruiWHjvs3a7neS2tfb1er0kH9jnSnFuPWHKLLhV6xQWqUAgEAgEAoFAIBCoiUqsfQByWnJed3Z2kiaZ2oZHjx6l73naJYXpyy+/nJi47ty5A2Dkk9npdHLxEk+ePEn+xMpGBmS1uU1ANeiaiBcYarUsvbP6Q1NzSeuZJjiskthwkhgpnswBP/aHV6sd0Rgz+11ZPBGQ1yKpn75Hg16XbpZjSbUFZSx8XoyRR29vtR5e31ShFp0mPMppZWZUdLvdXCJYas2UBvSiEobqu3Q+KXsby6gJa4FRTMD29nYah5ZSfGdnJ5fAcWlpqTCZ7TRiBU5PT3MsXQcHBzlmO8suqOVTq4YmwAXqj8nzoBYAG5+irJ42vrOMmQ/IMpoCyKSHsPGSvV7PZfPkcyZNZaFQmWTlgxcXRG3/7OxsKi/rbSmQeR8wXlqHqijqby/Ord/vJy061031BrBzwrMweOyEjbBZOVZpWx5lm1XLO+9j2Rg3PT8/nz7TpMR8Ft+jsS1F6QTGqaO2bmHQAAAgAElEQVTHbGvZ+w4ODpK1gmVbXl5O+wOykjIm/Pbt2yleimOPFqmbN2/m5pPG99jYn6asu3YdOj09ddmAKS9YN+71tM9pCWGZ2+12JnaTaGo+qeeDtUipTFDPKl41Aa+i0+lk6M71urS0lPNYUrZda5FSz5km4Y3nmZmZNEY4FjVlCsvtxctXkQWT9Jn3W/suz0o+NzeX2p514TjVvQXB8aqpOTRWalJZV7p6eZtwaxbb2dlJE4kD6MaNG8m1gMKPQYcvvvhi+oz3cHA9ffo0mb55ffr0aRrYNg9JUwuwPcTooUAXJMIGKAL5zN0a5G87SZ/fhDmek1s3ZXbDpu6KKmjtBts7kHj5P7z8St7BRa91wA3BOK56kx6WCK9PvDacBLb9W61WhrqVV9uvevC1AddabruITCtDudee6vplKWg1LxsXWbrAbG9vZwhltPxzc3NuTgnCI1cZtx62b3QeqPwDhocC1s0Gux8dHWUC/LXsVck/msgRY/8GsnLCpjkYDAZp/CidNstj85cp8YwlnFlcXEz3WbdgHfNNwsttprmT+BkP8t1uN5WR/WsJXFhevV4EtN+sXNbPCF3TimSAR8TStKJF+1WJm4DsXoJ/cz7dv38/13ecO5qfydKaLywsZALSgWw72XE2zrzSOVCUbqHf7+cIV65evZrKxpAHzcfJkAfeQ8XS4uJiajslbLH0/Vqnuv3oKdrswUzXfT2AULZRoU5ijKWlpUyIhkL3KkWkX5NA11CbZ0uJZ7iH04OUunwB2TRARQcpzSNIqKLappmZNI+UxXltZ/cG6iqvLuXAdPqjDuz+zRvP7XY79YVS6gNZt2A7XpUAxHOlH3edDde+QCAQCAQCgUAgEKiJUnOOBqhbVxb+r0FfNOkuLy8ndz1an6ilUJMbrU5047p//34KtKSW4+TkJEf8oJr6SSxSaoLV/4G8JarVauVc+qjFUNc+axb2tGHqEtKEVcBrD1t+L8O3Z8FSrY2niefVmqpVs2GplVWTVRXUFHnB1R6FrfeZRZHZ28Kz2Hn/T4uy2brBnZ2NEiGy79QFpkjj7FkiPDripmCf540lrYel1aeG6cqVKzlXRi2/0hrzOzuHlUp83DnmEYEAWTpWraMlKFCtGLXp1lVMg46nTYyhrjZ2nrRarVw/eEkjVQNvE+wq8Yz9TC1enuW0Seuo5+KkWmg7P1SzqUmk9Z7j4+P0O6vhvgiUueaqx4iVs61WK2eh0XqUrUNN1m92djanOVa3J6sl3tvbS/sCS+505cqV9Cx1f2aZ7RhXGvymrfCWelzLQ9mlblX0WKHHBa1O7XY7hUEwlEHJX1h+jk8NGagSOlAV3u/UcmTHkJfGgmv38vJyLu1Jmftyk2uT7onsOq1U9V7aGk3Aq89aWFhIFihaRVV2WKukppex+6Wm05CUyQevTydxH79IuQf4VlLtV2sJVO8rj77d7mubcLEMi1QgEAgEAoFAIBAI1ETlGCme4njqVv9faoc0eJxWKhsH9dlnnyVtDK1PvG5ubibthpI9aIA6PwN8bcM4sFoltQqoTzVPtzZ+4/j42CWZALKEGKopLUrIOw6qWHuqaua1XEX363d6LQp2HccqoNqCspinOlodz5rkacW9NvBi2uqOvTKNmxfToO+08StKBFJG8tGk5bMqPF9rq91Tq6ySDgDZoF6r0VN4c7SsPOPCG3/WSnt0dJTmPuWUasqUGADIBrp6iaCbhDdmrdZ+aWnJtYzZGBAvBYK1gmsMqGeVLIvHbBLefPPGkaZrYD1tsmGVsZcxpxRlpAL8zkvubK0y52mvmySb8OSlZ1FRGnTOfa6tSiLBOWY1zx6xi469acFawk9PT3N7maWlpZwcoPWp1WolK4clANnb28tZQz1vDGKcGCkPdpyrPNO1yc4VysHBYJDawItTqkNgULc+Xmy4lW/9fj+TiBcYegrYVCOUc4uLi7nYKE0zwPt1X+iRTOi1aUwS8zet508DGv9att8s8/ZoUiZU8ovTzZ09dHQ6nbS50w0EJ5Blgtrc3EzCwxIz8Hn6rKWlpdwBSheScRdg7zCgHWLrq/l9aA5WBkObe+D/a+/LdiS5qq53TpVZVVlTl7u7enB7xhgbW0YIIcRrcM8rgMQd8mduEBKIN0DiAZB4AbiAG3wBsiVjjG262273UF3zlJVZldN3kf86uWLHjsiIHKqa79/rJnKMOOM+5+xhbXaLTDt0TGNgoq35AJjGDsYbbf37tEBqa2PMgsFigMI1r+DgBVhvvCyXnSxIc+NLGg9JQZo8kcdBWr9bfYFFLG3TrctvbWinjTTGJatvOIA3SZnALkeakcfadE/bVTYJ1vhnGaAZoNiVzWLAEsnOjDQJsrg/W2Q05XI55g7MLsP6wM9tb8kHzfynD6OzBMsQPUY47wtvvrlss2AWGwejlEG6/NZ/02TerGAdpLCuc5nYdVkfkrBPKBQKEfY3BpMTzcqdOY2Qgd3gUEbOs4Y6o4yQFexWis050Gq1Ym6xVg6zafehvl8SM69m3+PDknaRHXc9yvt77e4uEs9fxvk/ObemdoXFvrNer4fDLowGOneUSPQgpRWBLE8ui8wBz8+CZ+XglKR41q9H/S/ps0nhrn0Oh8PhcDgcDofDkROZ80hpzQtrJDjwUCTK1c4Zo0Wi9JI4ucO1guklobmpVquJ9JVpGYnzwCKdsFxTmGaZ/8duPezSp3/H79NO0Xm1AFbwu74HawT5N2kWl7TyWLS5SbTn/F1WcFb7STX2VptYVsIs7cPWnllpayxNoH6d1iZZtTXTRhayCf7MsixpWAQaWcbSrC1Teqwz1a3Whlpjhftv1ppJbkNrHKPM2kWPaWQtohrdX9wWOn8KW3ssd4uL0s5a7W2tc9o17jK1xxqWbM9TTsszYlawSF+0pp/LxO57OpyAyV707/XYYlgWpGn1Z5qmXHthMOEGYNHqJxE0MMb1ysgDLbO5D9O8JZLKqe89y7HHnjbauswWKexPYZHivRTqhb1ovV4PlihYDdGf7B3FlOfaEpWFFGsW+G9035smZu7aO9O7OxwOh8PhcDgcDsf/QaRapNL8sS3to5WwUgdX93q9oHHRJ/75+flI4C+ep+ML2M90kpO9PnmzlkV/xz7++pnn5+cxWni2bunYnmnHdLAmKM1SkaWtLO1smkUn6X3SZ1nB/vN5tQlJZUsbz5a23tLA8fuL0txY7X4Z2vy8SItRYI0krhjH1u+zxoFcFHQ/9Pv9WPwa3jMpzmVo+6yAf8uzQFuk5ubmzDQQuBdgyWcrPYKVFgHXWQVfZ0FeH/tnBePK3MuwUnP/WmMQewKOlbIsOrinjsFjy1ceq9y0+tdaYyzCDx3zqWnf+TdsuU2zfE0DaXFWWeNwLW+JPHNrmnWyLHZoY+xJT09PY/vTbrcbs5hyEl5YpLA/YW8pbYXnGCkt8y4aaeP9/6IF6qLhFimHw+FwOBwOh8PhyInMrH36JMvaR2hO+ESuaWaZ4hhaTnzG32ltMycD1tqnSeML0iwXluVNaxcArq/WOBQKcXa3WWkAuD209cmiH9av+X2attOy3ojYVh7936yAD/Ko/s1y37S4r3HrdJEWqVGMYc+6Rj2p/Ho85tUSW/eaJbjvLW2zVS6ULQ/V70UgrYxanosM5Voam6bF4MnXJDk+7eSU/z/iWdUqc78maeOZbRZWgGKxGLPwsgxOYnIdx3thmmPPkklWvLS1v9C/YeT1vBl3PORlYWXoPsjbrtNkxrRi1XCF9anZbJoeUxiL2KeyRYoT8IpEGT+tGCkt87Ja62aFZ1VOXCamMf8zHaSsjTcLCR1AVygUYlTl7EKS5C4iYlPjziJAjwVompsaCzq9meYNhM6lYN0ry+eTwDoAAEz7a03mtANUGvh/Fj05vss7WHXAbRLScqlY75NcKvkgZd077eB1kZjUzXGSe00DkyzUaZjmvUbdO2v+sKy/m7XbDiPtAGgdpLRrq3UvvRnkPFssI9PIaBzZ+v2/ra2s8loHKj3OLKWq9Xv9nIt2W8zbZ0m/T8o3mYRpHwAtWHIpSfmV9Puk77I+e9z+5H2NdrnjsBO8ZrmoFf1w57NS8TDJCcY1U54npYRxzA6X4j5/4U90OBwOh8PhcDgcjv9yFP7bNFwOh8PhcDgcDofDcdlwi5TD4XA4HA6Hw+Fw5IQfpBwOh8PhcDgcDocjJ/wg5XA4HA6Hw+FwOBw54Qcph8PhcDgcDofD4cgJP0g5HA6Hw+FwOBwOR074QcrhcDgcDofD4XA4csIPUg6Hw+FwOBwOh8ORE36QcjgcDofD4XA4HI6cKKd9+Ytf/GLsbL1JiX4LhULu/436j4jI//zP/4z+kUxWp4tG1jq9//77I+uUpQ0Z3A+6T/heee+btU6///3vw0N7vZ6IiHS7XRERabfbIiJyfn4uZ2dnIiLh2mq1pNVqhdf4Hf6He+CeqFuhUJBicaBXKJVK4VouD6aIdcXvfv3rX0+tnyyktfGsEmq///77mer0m9/8po9yoCydTidybbfbkT7Dd/heo1QqhbZFe1cqlfAen6G/uO800M8iIj//+c9H1imvfJi0/fPOH8Y0ZB6enzSnUT89B7n/9JXbBP1YqVRCH9ZqNRERmZubC9+h/37yk59cuBzP0ocX2U/WeOa5pWVet9sNbbmysiIiIs8991x4j3sdHR2JiMiTJ09EROTx48dyeHgoeCZ+f/XqVRERWV1dFRGRarUqIoMxgGf+7Gc/yyXzWD7girHEwBipVCpSr9dFRGLX+fn5MK5QHtRje3tbnj59KiIiOzs7od6QO7j/wsKCiIgsLS2F+/72t7+dydibxfjKKneyyvH33nuvLzKYk/Pz8yIybCO8r1arQfYCnU4nrLONRkNERJrNZniv12Bed631ll+LRGU82ijLfPrVr341ci71er2Y7Do/P499hnHKbY574srrEpedy5+E9957L1Mf/fnPf+6jXFhPeY3FlV/jmiSrO51ObE/EVz1neT1Nwy9/+cux90TjylqUjfd5aAPe50FWYlzX6/XwGvIBaLfbYQz/9Kc/TSyYW6QcDofD4XA4HA6HIydSLVKTAKdKrTnh99bJ0/ofnyYd6ZhUs5Wm6WLttaXJ1tqLSbT10BoUCoWYBpM1P1rj0+/3Y9pzfp+kfeF7WBqZy0CWvuT2uQxAg1MsFmNzlzVE0Eiy5VB/xtYOQFumyuVyxDqFMmgN5iwwSTtPKrum1ce6HNY8Bnq9XugTyxqM11pjWygUIpYFkYH2D1o/WKTwXalUutAxfJnzJQusvkCZeS6h/bm9FxcXRSRqvcHvoFmF9ebw8FBOT09FZGh9qFarMUsEW38sK1JWpK0L2jpRrVbDOEF5uIxafqNuJycncnBwICJDC1yj0QjPxP3RxixPpolRY2xWFqhx7w8Z3O/3I1ZkkaFFslgshnUZv+HvtVxmoJ8wdvv9fkxupFk78sp11J+tXNZeQa85pVIp0hYi0XUpyTunUqmEe7Bcs7wn+JoHy8vLoRyW14dIVC5bn2nZkeZZ0Ov1YvOM9xtZrVNZMQ1LlEjUYoe+w73Z4or2rNfrQdYAaKdutxtep2FmBykg6UDFnz2LB6o0dzbrN5d5yBtXKI9y39PmazynWCzGJpE16SYpIwQ2C1DtOpa0gdYTHVfLnYyFJe7B7gdZDpbTRt77ps2xWcMymWPxsBYsPkjBBQSbObw/OzsL/aTdmNiFgl3D9CLO42Lcfhq3PdOeN0qO4P0s+pIVIFZ59LzhBQmLCS/E+A5A29dqtbDp5Y09FjA9t/v9fuxes8K47drv92cu49M2WXyg1S5HvDlYWloSkeEmoVKphHmFg8Xe3p6IDA5SaA/8fmFhIfQZNhcsD5PccZPAa03aBkxv3nkMWQc7HJwgF05OTkRE5ODgIHaQOj8/j23uWQmD8TgNTPsAleWe03gG2pMPypif7AqFfkJflMvl8B/tslsqlWJrE67n5+exg9SoeuY5TPG6ZCnjcD+9p6hWq6EtrHWMN9iMbrcbnoPxXalUwv31gWqcdQnzkvdalmuiJbO1/OarPnhZbn981fvwaRysRrWF/p4V5mkHRwBjc3FxMbgs47qwsBD6RSt1z87OwnhIg7v2ORwOh8PhcDgcDkdOTGyRykoOMallin83a8vUKM1IHg3RZVutLKtTmmsFXy2NjEhUU6G1owwr6DIrWGut/8vP1hYIq7yWa5++8mv+/0VbeS5CYzlNQNtdrVZDP2mXECYTYBclHaQMrXKj0Qif6WBltipC68QWKX1lUpAsmIb2N0sf8lzUxCeWzLDcavOWMe2/7MbHGj5tEWSNLLtLiEjEKgLSA1g6FhcXw3jA/9jqOG03kVlg1utOknuliK1tRXnm5uaCKx+0rNBedzqdMJd2d3dFZEjCcHx8HH7HfYd7oV8xB9MIYpJgjXONYrEY5iiPJW3VhFaZrXKoG6xQu7u74fXx8XF4hiYxwHPm5uam4tp3mYQS496fgT7mQH0Aayy7faIda7VaeI3v2HU3ieih2+3GXPCnuY5Z7vm679m7AbDIM3iPgTbm9kIdrLABXTfci8k2sgLta1l32YpkWWi0/Ga5bnkb4H0ecgrLU2kUsoYvAFxvbXlDndgahTUHMu3KlSuBiAdrU7lcDv9Fn8NLptFohD1OGtwi5XA4HA6Hw+FwOBw5MZMYqTR/cut0ab3PYtW6SL/1vLgsK0FSm1pty5pA7T9cKBSCdsHSZmgf1FKpZFoD+Hl5YFkULMuYFRuD31vUnRbFp0jU9zfNcjeJZSANoyjlk8bTZQfO7+/vi8hA8wOtNsYGx8dAM8SBy+wvLzLUAh0fH4f4BmiVYa1qNpsxP3X2t8ezOX7KCn4eB6Ninyw/blyt8Yb32tLB81JfxxlzaePJiofiODY99/H7UqkUNM+s7cN1bW0t8h3HoeCe7NefRev3rGBWlikrBlVrnDl+kAkmYImCJRB9s7+/H+YoaMFhmWq32+F/iK1aXV0N81YH2zPRRV6kxTqXSqWYVXNxcTFCmCEylPGdTieMFxBnIO5rf38/fIZxxpYRtkSJDOTRJBapaVqi0vZD1r2mOf6YYCEpDqhcLscIQOr1eniNNsb7ubm5RBKgfr8ftP/W+jwpeJzi2drjhAlNeAxADmKMYRweHh6GsYV1CWsWr0HsYaDrw9axvOuS5X0D8LOtuCltteH32uuDrW3aWmWRU7B3z6z2RHq94rLp8vf7/bDPgNXp2rVrIiKysbER1ibIgLOzs9g+A/uPk5OTTDLPLVIOh8PhcDgcDofDkRMTq2qT6JeTWO+yatuzxk3lRZrVaxYsXReBNPY9gNlrWDunrUbQMjSbzXA6Z60LgBM/MyvhM20RYr/arGAWJ20tA9hPlv2ek5LgsYYojY1Gx6wwJqEvtaDvk8Tmk6VslwFou5kalxPdiQy05Fpjzixu+IypjKGthGaImbjwGbP8aQ0+W1DGoWzOyrw3ivUOVysmT2TQbtCGshXNohmepD78f76yX7xOZN1qtWJJdlGuer0e+hTaPvier6+vBwsH6tbtdoMcwZyFfDk8PIzEs8wSo9IFpFmlrbk/6zhd1irjPb6DNn15eTlYA6GBRf8eHx/L5uamiEi4Yi6x1Qf/X11dDffV/TUO/TnPCd3unMxUryeLi4sRunOuE2uQYV3b2toSkUH8F+QDYCWDxnUSi9Q0GV0tqzXfy4qVnCbYCqUZETEGC4VCrB3r9XqMyY9j2jRDZ1rMO56HcvDv81o7mH2NqbtFosyQ2FPwWMPvtKfE8vJyqBssoByPpy157XY70co4zpjjFAFJHgrWHqfT6SSy9rFFij0R8F5bsNi6lcbyNwmyMPS1Wq3EZM/VajXIwevXr4uIyO3bt0VksFZhnOJ/bGnU/ZqFsU9kSq59aQcS60ClzbxptNmzImu4yKD+LIfHWW7MRaKBlXwgwWDSG9ejo6MwiXAPLLpra2sx1x2mQdUbMg5azAp2OcRCqilERaIUwPhNmjuBlVsK761NlBbCLOAnyVmUREPNixXTsesA88smMQFwmGm32xG3L5HhBohdj3izvb6+LiLDgxRvovTvsXE6PDwMQo7dLPhQJRINYM4j3PMqbviAlJRtvtfrxYg4eNOhN4zs2qXd7cZxr+Ixo10gefHU85ZTAuhD73PPPSc3btwQkeFidfXq1VAnzFXc/+joKLb5xZXzGV0EkoiP2C3GWpO0G/Qk1PoWeL6z2ypf2Z0ZmzqWx+zSJyKyvb0tjx49EpGhax/698qVK+EAhb7DBkREYvLccukehTRlFAf/60344uJizKUPa1Sj0Qgbnu3t7cj14OAgQoCD/+NemlKdlYmT1EnXLSt4nuu1iWWGlQspzQU9716F3ay1axtvVPVhZH5+PtJn+AxXnRKDlayWclBvinm9zdO2GLOFQiEyd7iurPyz8qihrGiP4+PjIP9QV/x2b28vpuDrdDoxOT4JUH7L9TvtoMpEGBY1uj5Icb/rNYHJKfQhaxwymjRYxBJcHrxGG6MvV1ZW5ObNmyIi8uKLL4qIhPdLS0uRNUlkICuhiMGaxG6nWQ697trncDgcDofD4XA4HDmRWRWT5jJmfaYDVfk0zWZ1vjebBvO6+80aXCdtpbCSwV4G2EqiNUcc3A/tL2eBh3Yfp/xKpRK0ldA841S/trYW7ovfHx4exggCOPgvDwU1l5+DMjWZBWvuOYhYa7+yEpzo/mUrlXbXKhaLY2tf0qweTPfOCf60RgawsqePeuY0xyjK2Gq1Yi6gmMulUilm0VhfXw+WjI2NDREZasXX1taCpRNXaABXVlbCuMSYPTg4iI29SayhgEVuI2IH9Lbb7dhz0C+Li4uh3poWnDOqMwUrNGKoj6XlzIpRhBK44jNOKIl2h/UQfXXr1q0gD2A15ISRGAu4bm9vy+PHj0Vk6IYFq0mj0ZiKS0heoH+5fSy6fZGo9poD1GflaqVd+ixKX7YO4jXKgbnx8OFDefDggYgM3Vb4/5iD6MOFhYXQZ2gL1lCPO/ZY5mjrrJXAmenydTLvg4ODMIbgrsiU7ngWZI6V3JeTx07iWQBkpXHWLps8D/GZdjdjinbtgi+SjaQia/l7vV7MpQ/jgV07OTxAW/vYtU97k6S59jE0kUFeixR7JuB/GMdMpqBpyefn52NrD75bXV0NnhKQ35zIGmMQe6rT09OYJwlb2vLKDJQ/zd24VCqZ1inL3Q9tgLkAWYN+tLwU+DMrNQa7Z+aFXm/TiCV4vkAuo29u374tL7/8soiI3LlzJ/Jdr9cLshEy5PHjx8Faj30E7yuzWKzdIuVwOBwOh8PhcDgcOTGVhLxZAuZZwwrtCrRgOjBZJD1J20VYpixrk9ZSsi8wygutgY4XmSU4ia7WRnC5OLhbZHD6hsYEmqMXXnhBRAa+pS+99JKIDIPJcTI/PDwM2mVoO3d2dmJxDrjn/Pz82AG9bGXTQfmsKeBYKW2hSdOC8VWPNSs5ITAOVWvaeLWCslkjiBgAaEw4bg3zh2Ns0p6ZFCMyDmAdYu289rFvNpsRzT7KjbgOaMXZ8onXsIRAO8jB4UxqwbF9IlF62qwBowwr+JutUDqWplQqhX6AlhJts76+HiwG6CO00dHRUdCMsZ82yq/brVarRaxYWcBWDSugGN+h/6CRXFlZCTL6+eefj1xv3rwZ+gblQVkPDg6Cdvbrr78WkYGcePLkiYgMLSOQF71eb2oU9aOQptnu9XqhXTCO0Hblcjlo2i15y/cfF6w11rEMbK3QBBFra2uhD1BuWGq++uqr8BrzEdrZ69evh3mGuVipVMKz0T8cBzkJrNgoETvOplarhbbF81G3nZ2doEHm2Cj8FnMM92dSDW2RKpfLY/fZOPFQmsAAdWq320GuYV6hn+r1eixVBFvFgUnkOXt56Ng4JqCyvA20pZavaO+0+GZr7UOf82/yrLdWAmsA44OTrfJvONmwyHBMLi0tBdkOWc8eE2wBFRmMSchxK8Y57/6B76UtS7wm6BjrNLr0brcbs0hhTajVaqkWKSt+itNc5IEV88dxXfpZnU4nYikUGRJKvPLKK/LKK6+IyPCMgXtubW2FdQh710ePHgXvCG3lYuKnNGRevdI2XxZxghV4KDLYhN+/f19EhoFd2Lyvra2Fzk9j++HnTuMwlRYQy7krMJAx0Nj1DXXBJgGYn5+P1Wna7n/cBjrQHYLi5OQkCG/8ZmlpKSykr776qohIGIAbGxvhvtjgffLJJyIi8umnn8pXX30lIsOFoFwuxzaQ7L6Ud6OU5hLCrn5aSPBGOy0Hj+WuZQk2fSDl/0+zH7k8evO0vb0t9+7dExEJgeNog9u3b4fNrd6gjJob0zhQwb2r0WiExQWLJzY5e3t7QVDhMP/06dOI65vIUOjduHEj1OnWrVvhM5HBJgOLFwfd4158eMfzoMTJAisrvQ4C5/ujrpwxXSuIisVikB3oP8yfL7/8Mmx0mblOu0Jyvp9xD1IcPKyDuiuVSqgLNtUbGxuhH+Aiwe69aHPIGPTxgwcPwniFrOfFCr/nfC7jLsBZ1wBWjGlGRD4oo68xZlBWVtCgb1gG6Lk0ybrELph6s1ytViMufSKDMYLncR+IDMYZ1ibIT/zv1q1bwVUTc5AZMzFX2RUqr0IszaWPmfr0fqFcLod+wbqFtXVzczPm0oey9nq92KFpaWkp9hmPgWkqxCww26BmIUXb1mq1MLegwMSBqt/vRw5cuOc09xN8+ET/aBdPVkrxuNQ5uiCfmGwC/couhGm5HAEmvhmnnsxCiLZD2+/v70dY90Sih0VN+nHlypUYMRAfGq3PktpynLqgrAsLC6EuaFdeq3S4g0iclIJJP/BfrahmpV3aQYpdJsd17WNZqt2a+ZmsUNIHqNdeey1cIddQX+xFHj16FNYmrMFbW1vhWehDllFZ1iZ37XM4HA6Hw+FwOBdqWrsAACAASURBVByOnMjtT2FRnVtB+jgpQ8vyne98R0QG2pY//elPIiLyn//8J9xDZHCahrYtzT0pjV49b/mToLVnp6enwSQIzSSoFd944w3561//KiKD4F6RoTWmXq9HrFpZypy3TqxRTcr2LDLUROK0/tJLL4WgPHyHe3399dfy4YcfiojI3//+dxER+fzzz0VkoBmBZgaWAtZuou6od7fbzU3ZzMGm2jrF1kKLKlyTTVhkIGnEEgxoWCyN8yyIRSz3hoODgzCuPvvss0h5Op1OsCBAQ2NRzM4KcMvjQH1oT+ES+vTp06BBxhza3d0Nv4PGHO6i9+/fD8QTkB+wTN2+fTuMM2hsFxcXY0Qk0HCXSqVcWnTL+qQ16IuLi6HN2Q1Hu+2hz+7fvx/67Ysvvoi0w+npadD6sUUO9eY6oix5+5QtUtpVkPNCwZKMZ9+5cydYpPTcLhQKof9gZbt7926oI17ju/39/TCXtCVybm4uyJOssNaANDdWtlhq6wR+U6vVYtY1K8A7a/nyWi6svC/oL8zpxcXFiEsfyg0rEs8hkUH7QxuOPkT/3r59O+KaKzLwGIF1R1ukuBx5USgUQh9oK8XCwoKZAoDln4hECCZ0Tiy27OC+sAizRYpd+lCuWclItkSJDOQixhX6BHPg9ddfl7fffltEhu6zKNfm5mawxjERxDRTqHBuHbQ72ohJqph4AuUAOPcjrriHJtdZXV2NUXKn5dvjXIFZgDKcnZ2F8msLSrfbjaVi2N/fD6EPevyfn5/HiHVQL6YkZ48ZlEN7ZIyTkw1joNVqRQghcD+RQT9qF0O2qlhEXPozq/z4f7VaDXNUX09PTyeySCWl5uC0CyjbyspKsETBmwoWqVu3boXywmINC/3nn38e1iaswa1WK1ZPtGHWPHNukXI4HA6Hw+FwOByOnEi1SGVJ+smfs385Ts84zX3rW98SEZHvfe974QT5xz/+UUSGmsxGoxE75c46WN5CoVCIad5OTk5CgCt8mL/73e+KyECz94c//EFEBvFDIiJvvfWWiAxOthZlKT9rUrB2CM9Au0Njsr6+HjSRuK6urgbNJ6xO//jHP0RE5IMPPpB//etfIjK0MEAD+tprr4X6oS/X19dDm6E8rNHJq6mwsmVbcVNai8KfJSXm5XvwPS2fc53lnINfZ0F2YpWDyVu0X3mxWIwFn3MyVO3vPW1AM8ZkE5o2lxPewULx8OHD8BrfQRP49OnToCHEb+DXzLTbuF67di2MTbbciGRPqAewJZqD1UUkQiahrRlPnz4N1gBYnyAL7t69G+qIPkVfvfDCC0GTBrmysbERIzZAPzabzbHnUr/fj6QJEBkGS1+9ejW0JzTit2/fDpZBaOhwr729veBjjvrievfu3dBvkB2dTicSO8HPXl5ejiSCnSa0Z8HS0lKwlOKK73Z3d8M8gaaXiSUsa8Y0ZQBrZPVawckmdQyqyFBbzbF3qBPqgL5EvNutW7fCPTC+Tk5OYolFWdZMI42FjmNcWFgIbYv7c0wytMps1UZ9OXEm7o15Cgv9yspKLDaK45ZnHesKOcg0/xh7b775poiIfP/735dvfOMbIjJsb1gVDw4OIpTauD+PzUmB9rGopjnGVFOIW5Yi9HO5XI7FJ2POz8/PB+8jy2JuWWvykJ0wAQ7kJdoJzzg5OQnfYYzt7u7G4t05RQy8kOCJwfG6mEvWHkHHJjUajdxeOihPs9kM/YA5hPds3WViFZ1yCDLP6iMmBtH7K/YC0mvJ3NxcbmIn9kbTBDu8r0U50MY3b94Me0/MG1ioarVasABCHmIt/uyzz4KnCMZ1pVIJ7YM2Q7vOzc1lknlukXI4HA6Hw+FwOByOnMidkNfSwGmtX7/fDxoxWDVw4v/xj38s7777rogMT+5/+ctfRGSgcdKWAotKHbiIxLxMBYuTKTR6OBEfHx/LP//5TxEZxkWgjsvLyzE/+1lZM+bm5oKGBFo59qfHKRun9Y8++kg++ugjERlaoqBVbjabQWv2wx/+UEQG1kSRgbUNGhlOAMksbSJDi9Q4CVFZS4U+0ExWHE/EGp+khLxMOWoxslkWLw1+dl4rT15GL9RjaWkpWAk0k9Dbb78t77zzjogM44igydna2gpWHk6OPE1wAmftm416Pv/880G7jLHx5MmTQI0NrRHmzubmZoSiX2SoiT04OAiaaVg9NjY2wnhETBEnSszDGMmxGro+GE9HR0fB5xp1uHv3boghRD0w/mu1Wui/N954Q0SGmujXXnstzDM8r91uh/ZCO+B9o9HIPZ+Y7RIaSbQPrBQ3btwIGj205ZUrV4JMx/Nhlb937578+9//FpGhtg9Ww83NzVB3jAGmoGZaeJGoxWAS6PnFqRM4CS3HgIkM+3xrayvMHYw39Hm73Y55S5RKpUwpDbKCGVd1+gy03erqarC+cmyCjo3C+1arFdoZYxAsuVevXo2ktMBVJ7VmFrBxY6QsmmzWmGurcbPZDOsUxhxiGra3t0N5dfqOer0eLFFMH85sgCLRGNNpJIO2qJv1ulWpVEJfYP7/4Ac/EJGhXBAZxiIzuxgzeooM2jOtL/LuL9An3W43Rn/OaSw0Jb6VpJk9QnQKEsiW+fn50GfYq7DniuUBgmfmqQ+v+TqpfaPRCM/DeNrb24vFTbGFCtYpWKYQd7i0tBSTNUtLS+GZeowVi8VYuphR4LhNbZHCvSyL1MLCQow9mxNSs3UKZROJemaxTNUeP5a1Kit4P2ZZolAeyD/E5L/88sshvh/yHHU6Pj4O5w9mmhYZxEqhrzmNCtPYiwz7MGt6hNxkE9ZmEK9ZWGED9MEHH4iIyO9+9zsRGQj6H/3oRyIyNG9jAe92u+bgmlXeqLSFTruTFYvFMGmwYYVw+/DDD8OGCh2BDUq1Wg0mRN6oT7MuPDk0LTOe8+TJk7ABZdcjlBuDFovsu+++G4Q8gmAhBLvdbljcsAl+/PhxmOiYEHzAzrsA86TSJmQ+UFlEFNqlzzpIWdSraS6EgPXsvLD633In5IMUNn3YhGLiv/POO+GAizGHAy0TQFguE9M4VOFgxLkt0O5Mh465jvH16quvBuWKpgT/6quvIocqkWi+FSxsTG6BciAYmA9UKEcW8CaZg8RFhi4em5ubYd6gnPv7+6H+kBNIJfDWW2+FjROEPzbDhUIh5sK0vb0dc13ig/C4m6RarRajzuYcXvgMY6vdbodyYGMOgqBPP/00yBFs3lH+s7Mz07UY41PnyVlYWAjtPg1Y45rzcGE8oC0gM1utVigT5hm7Oek8SNNwqWJYc1RvNtfW1sIaA/mzv78fU0pgjpRKpdDHcB3Feluv18OzsLnY39+PKcB4w5Q3jQX/V7v08cFUu7AeHR3FXPqw5uzt7cUo9Dn3GeYW+rderydS3o+Tz4ehxxofpHBfJpOB8hXkW5ARnU4nzC1N7nR4eBjbpFuH+En2FDz/9MGDDyCa3OD09DSWU4/nibUGiwxkgHYzq9froc84v51Ifvpzzl+l24XJuZjaXWQgb7W8hwzc398PrzG/+ECFOcpzhAmKdH3yAuVqtVph/OvcXUkHKe2yxledd40PRrr/isViTAbwd3n3eaw80vIP95qfn4/tH15++eWwD0ddsI7eu3dPPv74YxGRcMXB6uDgIHZeYTIafKZJzEbBXfscDofD4XA4HA6HIydS1UtZk/Bqi9Ha2lpwN8LJ7m9/+5uIDCxUOHlCK4PTILtlWSf2adJ9joJ2AZufn48FG8Mt7sMPP4wlFYVW7Pz8POZWNU13EJEopSsncRUZmoPZlYqTBoMw49vf/nbk/Ztvvhk0x9Da4P9ffPFFOOGzFlq7brCmI6+mgqmStemZ6VWt5Ls6CBe/qdVqMZe+NI2IRYnOVKiTuIRk6WfWoqPumur81q1bMQ0/xluSW9s03ftAonBychI02bCiMEU4NI2wUKytrQWNEqw03/zmN0VkYKGC6xxnHxeJuvWgv5rNZhiHqDvKsLq6Gp6dBWwBQn0wh5giF+MAlovXX389aPxRD2ifb968GTRdGNeYn5ubm8FliV1iUTdtBeHg4KxAmy8vL4fywjqE6/LycsRNV2SgvWPXRRGJ0LizNU4kKivR99AkXr9+PZJAViRdazwJeHxry3O73Q7aXO3Cdnh4GF6jPJBpFinBtNcf1uxj7mPcoA+XlpZCmVCPJ0+eBHmMsYRxtry8HKzZ0J6jHyqVSug7DqjXSUNZjua1SDHluZWwVGQwtjF2mKAGFihcOfmutvZwMnhOXo3f6HJbngh5Ye2BeI3X5Co3btwIGnXseWBte/DgQXBDwlzD+GS3SO1+hWdNCqyRTAfPlhuR6F4G13a7HcYL5FfaWgy02+3QT7yGayIaTqabp5/Q9jxmtZWhUCiY8kHTnmOenZ6exlyuMX/u3LkTZB3kW7lcjnlp8B4p77jD2nR2dhYrG7v5atfZJCuVSDJdOsqalkoGGNfdVyS6D9OpOVCe9fX1YEXH/NnY2Ii48okM9woff/xxCLWBXEQ/iQzHFu9JmDRL1zPTXi1jfR0Oh8PhcDgcDofD8f+QSb3EGg/rdIbTPLQUlUolaBsQvwGt+eeffx60ZfDnhoasXC6bVpukE+G0Y43Snlmr1SLUlSLDALadnZ2g9cMpF789OTmJ0XZOmzaXCTGgoWCtMr5Dn0BT/sorr4QgV2jTodlrNBrB4qaDX7e3t4P2AFqBer0eNIA6sHccYCyxlRLaAiY40BoT9h3XWhT202dLFK5MuS5iJwMGCoVCLjrWSVAsFmO0vWiLZrMZNLY6cLbZbIYysuZtmmMP4+v4+DjmU465sL+/H+Y4W2x1gCe+u337dogdgIYd1o9Hjx6ZFhxdT461zEPOwPMF98B4w7i+cuVKKCusanfu3ImkFRAZypPj4+OgZUasEfqMrVsYYxbFLGvK8sbmsBVTx4/g/mdnZ0GzB8vbw4cPI0l2RYYaPotQAv353HPPhaBgJgHBMzV5h8h4MQMa2irQ7/dD37PVCVYAHcvJhDm6Ty6C2IhjdTT1PpOn4HcYqw8fPgyWQ3yG8XL9+vVgicJYxb3a7XYkyF5kMFYxl7RVji0MWcFJWjVZBwem63jEra2t0E/oE9Tt7OwsyH2MOYzrtbW1CMkEys8J20WiZEPjkgZZQJuxtwTau16vh/GoPTq+/PLLYHWH3EEda7VaJDYKz5nmmNSkAiLxFCGdTidG+MQ09doyxXJMkxD0er1wL+wbOMZGW1VarVYuunDeM+gYa5atFtECxgrqhf54+vRpjLACMvDo6CjML7byJ1lCORFuVnBKBk5xwt+1Wq0wVrAXbDQasRipLEQUPO7S6NIZeeUDz0NttcM8ZiIkWP2q1Wpoe6ypILb75JNPQtwuxiLG8uLiYliH2DNC76+ArOkR3CLlcDgcDofD4XA4HDmR22RgxU3hNU7rx8fH4TNoG9g3mGN0RCSiabiMBLxpYA0TTquaArTf7wfNKyeCExloDHDqnpVWEyfz8/PzmCYVmroXX3wxxjp47dq1UF5oxmCFunfvXrAYwr8U2pSFhQUz3sHSOuGaV+vHSfQsqxOuVvJdPV7YJ1pbt5hJSvt/swbOYgecNmtXGvTYQRm3t7fD+GLtncigT2E9ZeuZZuKZBNxW0OBBcwzLxtbWVhiHGDdXr14Nmjt8x77L2koFaw9rqnHd2dkJz8Sc5BijPBYpZiqDRgxzBGN9fX09Nv45GSHiiKBJf/r0aWJKgFKpFKFhFRmMSR37N0lfoYxLS0vhGRjPKE+z2YxYokQGWnJo9vAZ6sGaT9wf/bmxsRE0h4jJ4tgePT+73e5YKRKSwBYp1BPj4uDgIHyPMQO0Wq1Ie6BsQN51KG+fsbxiOm+RaKJplA3xiQ8fPgxjDfMRVpA7d+5E6M5Fhtr34+PjWNLRVqsV2kxro3kNzAornlXHIXQ6nVAnlGdnZyeSJFVEIkx91pwUGYw31J1jh3ms6es0xh7TQ4tE45pQz2azGaxOkMuo497eXug7HUtWrVZnxhSpYbEBWkyEfMX32Bdh7d7f348xwnE90O4Yl5wCQVtDmZE4C7gf9LjDPavVaiyum2Ph0TdY1zgtBdYeTqKN+uM36+vrkYS9jHGYInmcakZNi+WY46dQNou1T1up+DsrbsqKSxeJxtdlBbNiQy7p9eT69ethfqNujUYjyD94TCHG8P79+0FmsCVKZGDl0hbrWq2WOK+yWqRSD1Jpbg28qOjvO51OMNGjg3nwYnHF5i6NuMJ65mWAn41BjLKWSqUwCNEhltvXrMrPOUc4wFZkuKCurq6GgYMyPnjwICzAMI9CQBwfH8dcdthFSAfnsRuNpkMdR2ig/djcr7ODsxnecknQ7hAMnQOhWq3GgoM5h9UkdZkUVt1YacHEHCLD9mdXTx6r03RXwjP7/X6EKlpEImQNGGdwy1tbW4sRHrDbnx5f+M3i4mKMyGBnZycITmwIOWA4j9sY5s/c3FwYBxDCnGMK7Y95w66N7O4oMtxYcH1YiFvuexrabS0P2JUO5ebcMCKDzSr6hqndsVjBBQzgBQl9g/64evVq+I4PAIDezLJb7TTBc5XnBJ6rc/Nw7h/OX6OR5UA1ydzinGxoP87zg00R+mtzczOst+zSJzJQYEIJoSmYDw4OwrzhzSJkox6X4wSVs8JKy2N2GUP5cZDa2tqKkEvgd7gn08GLDA/s1trEpAKaZGha8lwfpHhTybmRICPRh+yCzLl9uPyj8pVZyCsnrPQhOiUBbyotl3fOcyQyqC+njBCJUmxrMovV1dXwTE2lzq6hWcAkD/pQxgcqTXwyPz8f5glkNI9D7dqoD1aot0g0nYKWgzwms4J/r8czHxw5xAblYMIMri8fUNMOWfx77SrJiu5xD/qVSiW0EeY01pUrV66EZ6H8e3t7wT0W+Qy14l8keoDCFbID48kqc5acogx37XM4HA6Hw+FwOByOnMjk2mclC016LzI44eE/TF+pv8NJcBS9+qwtUdb9006iFvGA1kiNqxEaB5yFWZuqOZgcWj/W/rFLB5f/2rVrMcpqaPpKpZJJjaq1uJP0IQc8W0nhcLUsUtp6xJp+XTa2TLHVFFdoZDQ1et4EgeMgrc2YrlVTZeuEivpeaffN20/Q2p2dncXc0KCJbTabwZUFlo2dnZ2gUYe2iAkpWMMsEiUwgZaJE+thjGI8Y4yfnJwEbVye+rArkrZg7O7uxjTKp6enQROoKW/r9XrMTYpdXSxXmjQL1LjuE81mM9QB/QHt3dbWVrCuwQq1v78f+hDyhDV82qKIPlteXo4RSvR6vYgFiq+dTmcqZBMAa2d1O3Y6nfDaCl7Po4m0XFkmWas42Fq72eC7s7OzYEUCYcnu7m6QAZgHcN9+/vnnQ7+gLzAGWP6z5Vq7QlmU21nB99IWLQ7cRzmg/d/f3w/lZOIhtIl2Fcb7paWl2Njr9/sxSxS/n8QqldTf/X4/Ns6tz5iMw3KVSntGEsZZl9iyoT0/2BKUZpFi0haRqEcH5DE8E9hdETg/Pw/WAowb7ps8FlHLIqW9UCwiioWFhZhFikmTUA9N1d1ut8N3TCKhXW054Wve+cSEKUnrQ6/Xi3kGsSVUk1NUq9WwFlikE9ZnmjSGLX15rdacmoZTo4gM26xcLsdSVnz99deB/Ay05xyCAvnJHll4b5EdAePu6dwi5XA4HA6Hw+FwOBw5kTshb9YYJv3fNE3fKKrzWVHQTnq/cU+vo56bt1ysOdTWAGgbWq1WTIve7XbD6VwH4C0sLMR8SNGHZ2dnMV9h1qKPq0ljQINiWfvSyBI4wBxX9uXVVhvLN5z9+lFPJlUQmQ5d8zRgBUOmEWKkacHG6S9o7zjhJtqbkwZiPKI9z87OYglvoa1cWVkJ41EnG67X6zGNUqlUisX/oSwLCwuROISsaLfbocyaKp81fKwx1Qm7ORlpGmmERdutv5sEqD8HRENrDOvGzs5OiE+BzCgWi7Hkudwv6Bv8hseCnmdM3KKvVuLrcZA1rjav/3veZ44LtgDosYQyHx8fB6sN5kuj0QgyC9pcWKSuX78ei42C5nx3dzcWf8SpFnQMxDjguaDXEY7zhAxg8gsd34k5vbS0FKE7F7FJj9A3TCih4/OyBpNbGOXJouczj3NtHbE8L7Ji1J4qCzjmRq+zPC71fo6fxWlGRAbeANoSiPG2u7sbI6DgOFv2ftHPy4K0mEvLgyctcTRbpiA3UQ+dKkBkuPc6PT2NzR20R7VazT2v2IKStK/msWDJXk2bzuQUWK85jkrHkFmf8bqPe2UFtzGnCeCycowXrPAPHjwIxC3oEyZDg2WTk9HjeWmWqHHX4LEl5KgDVdL3o9y9LpOhLw3jLph56zHJwtxut2Mud7wJ5IEmEhUa2vWoUCiESa83wUlubUkHqHHqlOaelnY/Pkih/JzlXP/XIpHgBYEFLJdnUpeQLMirQLDcjCyhMY3+AdisrxcgjJv5+fkYuQEfxnW+JmYT48MVrprJjPPEoA0wjhcWFnK5UKDM7IqGKy9ampGLGSTZ9VRj1ocmC5xXDK4ROs8cb3rY/QSyAu3PQbtMmCES3XBr0oZ2ux37bBqbWQtZXVdnpaAbF+z2qTeZ2GAeHR2Fwy8ORL1eL/QFSCaQx2t1dTWMQxyQMbeOjo5Sc5hN4tIH8D00UQ7njsJBijeqevPH+WUsBQt+ow9snU5nqgepLOMl6b66LbO6XCc9Y9zyaVjslADLOD7MJj2X+0uzX+J6enoaZA8rbPW6z/Mgj/LSUvBaCkdNpMIbbc2muri4GDbkOscU71NY+aEJmNCW3W43F3kG35eJKizFsHXY1fsc3h9qcgp2ydeHJVbyWN/lPUhZDIFof5SD9wM4PD1+/DjIP9SXc2PpQ5m1RgGjxkgWuGufw+FwOBwOh8PhcOREJotUmutdkoUpzRKlP3vWqM7Hef6k5Z3k/6x9SaLFZE2L5W4BcMC81sRrLQi/Zhe8acDSPo1rkUL7sIuVtmBYFiamXtfuWpVKZSaUzRb6/XiKAf29hWm7kFpIy9EF7RRTykJDyeZ6bfFk4hK2UokMLCi4F1+1RZX7N089LU2mdnFhN5xRbqZ8TfosL/L2G7T8p6enwSVFu6YUi8WIhU8k6iKhr/Pz8zFtLs877frLhBJp7ihZMWsK8ssAzxvMJU3QcXBwEPoTc6NSqQSLISxSsNjUarWICx3uITIYAzpo3soTw8jbVzw/kty8Dg8PYy5T5+fnETIZkaHG+cqVK5HgcdQTZbZc6ixL1Dj1yYq87naX4c7HSLNIAbweQt5a5WCLDsYcxirQ6/XCZ+h7JmDQuREtb5I0sIxJkjcs49lyitfavdBKicHeFDrlDRMJaUvbOFZeyyJlXa36aisVXy3vJTxPpy+yvJiY4CyvlY1p51E/naLj6OgoeKbArbnRaIQ6aAtZvV6PpY1IskTp9hkXbpFyOBwOh8PhcDgcjpzIHSOVpgnMYmG6SDKJZw2zqh+frPEMHXDLVOFAmtXJiv+xtO/jxu+MAlu/tFZ21HO09hFalVqtFrNYsMZIJ2nk9tRWCY6fughY8U/W51kwzXHImcl1sDH7iutkx2dnZzEqVg6A1X2N37AmE9rAk5MTM/BVJH+SQMvqqT/LGmSe9H0eTKOvEBvTbDZjMTFMrGLFBFjJiEWi1gqLUtpKzp2VcCgPLAryvHhW1h+26KIsaD/WzqI/OeYVFihcYTksFAoxghGmFcc9+NlZEkNnBRM+AJjbsD4dHx+H16hnr9eLxWoxwYQmQeGYRfYywPWiLFKziometeUV92eyibRnsWXK8uTAb3QiVSZnwDOxPp+cnESsLiLDMZ6XnMHynmFyG1x1ihT20mErra6blpXNZjNGSMXP13NpHIIdnpfa68aqE1+Txrv1HcpfLpcjFOr4zKI9F4km3c4KtgTq2EmMmf39/RgRUq/Xi1nGOGWEtU4BaXHK+jdZ4RYph8PhcDgcDofD4ciJsVXqoyxMSVqNUfFWl60VzIMsZb2I+lhxShYzW5qPcJLFQ98/DybR9lnMM1n/pzUysFZ0Op1YHA/H0mg/ZktrxNqqvMnnpolnhdWSNb2a5ctitGNGJ/QLNEmcNBCvNVW2yFBjButKt9s1fblFBv2cR5OZNQ4vDZdpfbLAcY9oCx0ryMkp0R/s864t3Kz51LEv7XY7Nn/T/NCfFZl/2esPzx9ONSEy1MQ2Go2Idlhk0E+aDhx92el0YnGGsPp0Op0YQxnHqWSJc84KjlPC8zlej8skMhifOgk0J4jXMRAsi7X1iT0vLtsSNc74uoh5w54FabG/emwwk5/1W824yPGwev3ksaplNlso82BUjJQeK2yR0mXgumpLU6VSCfOSU2Noaze3X94xyDJbJ31nWczWRXyX1gaAtuJZLNBspUJfom/Pz89jsXBZ68QU7czWJzKwpMM6xZ4UHIMtMpQFHGNqybBpWqKAiX2Tkg5GaRvztM8c+THK1QjvsxKFpH2Whlls7rMepHhypJFN6LwRTCCgBRATUFjjeRJa4MvCtOcck5roBZg3hnoBLpfLkSBekaEgZKpsfaDiPuGxwSQRXC7eLGbBRY35SfphbGFP7qjaRYXp69MOobw4WxtWfJclV9O0xuIkbq7PElgWaXdmDmrXCqJ6vR7IF7CpQP+2Wq2wIcHBBRsVEYkpliwCImCcoGyLglznMzw9PY2UCeVBXXROmHq9Hr6zNtxa7ne73akQnGjMyo0v7f6z3jP1er3Q/1qm9vv9mAKmUCjE5DiXWx9KsNFmt1LOI6VdyXhOjJtuJOnwYCldu91ueKY+rHDba9INVqxiLqXtH4rFYu6+ZFd5LfO4LTXRWFI9gaQ5wQdNroc1v0QGfZaX/pzXDk0yYckrJsLR+wb+LmlNSJJhTjbhcDgcDofD4XA4HBeMwn+7Fs/hcDgcDofD4XA4LhpukXI4HA6Hw+FwOByO3qWXRwAAAE9JREFUnPCDlMPhcDgcDofD4XDkhB+kHA6Hw+FwOBwOhyMn/CDlcDgcDofD4XA4HDnhBymHw+FwOBwOh8PhyAk/SDkcDofD4XA4HA5HTvwvlvYc7V6NMJQAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "num_channels=256\n", + "max_columns = 16\n", + "\n", + "fig, ax = plt.subplots(nrows=num_channels//max_columns, ncols=max_columns)\n", + "\n", + "fig.set_size_inches(15,15)\n", + "for i in range(num_channels):\n", + " v1_k = v1_model.simple_conv_q0.weight[i,:,:,:].numpy().mean(axis=0)\n", + " v1_k = v1_k / np.amax(np.abs(v1_k))/2+0.5\n", + " im_h=ax[i//max_columns, np.mod(i,max_columns)].imshow(v1_k, cmap='gray')\n", + "# ax[i//num_channels, np.mod(i,num_channels)].set_xlim([0, 223])\n", + " im_h.set_clim([0, 1])\n", + " ax[i//max_columns, np.mod(i,max_columns)].set_axis_off()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 91, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "9.899494936611665\n" + ] + } + ], + "source": [ + "# Nyquist\n", + "\n", + "visual_degrees = 8\n", + "image_size = 224\n", + "\n", + "nyquist_f = 1/(visual_degrees/image_size)/2 / np.sqrt(2)\n", + "\n", + "print(nyquist_f)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 22a25b2aaebb3ace36c8798832e509c0474e7616 Mon Sep 17 00:00:00 2001 From: pellegreene <36171165+pellegreene@users.noreply.github.com> Date: Tue, 9 Jul 2024 20:55:06 -0400 Subject: [PATCH 55/68] Ep/add effnetb1 cutmixpatch augmix robust32 avge4e7 manylayers 324x288 (#946) * Add effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288 * Update requirements.txt * Change PIL to pillow * Update model.py --------- Co-authored-by: Ethan Pellegrini Co-authored-by: deirdre-k <95875723+deirdre-k@users.noreply.github.com> --- .../__init__.py | 9 ++ .../model.py | 140 ++++++++++++++++++ .../requirements.txt | 5 + .../test.py | 8 + 4 files changed, 162 insertions(+) create mode 100644 brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/__init__.py create mode 100644 brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py create mode 100644 brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/requirements.txt create mode 100644 brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/test.py diff --git a/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/__init__.py b/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/__init__.py new file mode 100644 index 000000000..0ce5ac672 --- /dev/null +++ b/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + + +model_registry['effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288'] = \ + lambda: ModelCommitment(identifier='effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288', + activations_model=get_model('effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288'), + layers=get_layers('effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288')) \ No newline at end of file diff --git a/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py b/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py new file mode 100644 index 000000000..e7603dc14 --- /dev/null +++ b/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/model.py @@ -0,0 +1,140 @@ +import functools +import torch +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from PIL import Image +import numpy as np +import timm +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform +import torch.nn as nn +from albumentations import ( + Compose, Normalize, Resize, CenterCrop +) +from albumentations.pytorch import ToTensorV2 +from brainscore_vision.model_helpers.check_submission import check_models +from brainscore_vision.model_helpers.s3 import load_weight_file + + +image_resize = 324 +image_crop = 288 +norm_mean = [0.485, 0.456, 0.406] +norm_std = [0.229, 0.224, 0.225] +freeze_layers = ['blocks.0.0', 'blocks.0.1', 'blocks.1.0', + 'blocks.1.1', 'blocks.1.2', 'blocks.2.0', + 'blocks.2.1', 'blocks.2.2', 'blocks.3.0'] + + +def custom_image_preprocess(images, **kwargs): + transforms_val = Compose([ + Resize(image_resize, image_resize), + CenterCrop(image_crop, image_crop), + Normalize(mean=norm_mean, std=norm_std, ), + ToTensorV2()]) + + images = [np.array(pillow_image) for pillow_image in images] + images = [transforms_val(image=image)["image"] for image in images] + images = np.stack(images) + + return images + + +def load_preprocess_images_custom(image_filepaths, preprocess_images=custom_image_preprocess, **kwargs): + images = [load_image(image_filepath) for image_filepath in image_filepaths] + images = preprocess_images(images, **kwargs) + return images + + +def load_image(image_filepath): + with Image.open(image_filepath) as pil_image: + if 'L' not in pil_image.mode.upper() and 'A' not in pil_image.mode.upper() \ + and 'P' not in pil_image.mode.upper(): # not binary and not alpha and not palletized + # work around to https://github.com/python-pillow/Pillow/issues/1144, + # see https://stackoverflow.com/a/30376272/2225200 + return pil_image.copy() + else: # make sure potential binary images are in RGB + rgb_image = Image.new("RGB", pil_image.size) + rgb_image.paste(pil_image) + return rgb_image + + +class EffNetBX(nn.Module): + def __init__(self, ): + super().__init__() + self.efnet_model = timm.create_model('tf_efficientnet_b1_ns', pretrained=True) + + def forward(self, x): + x = self.efnet_model(x) + return x + + +def get_model(name): + assert name == 'effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288' + model_tf_efficientnet_b1_ns = EffNetBX() + weights_path = load_weight_file(bucket="brainscore-vision", folder_name="models", + relative_path="effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/tf_efficientnet_b1_ns_robust_cutmixpatchresize_augmix_e4toe7.pth", + version_id="iB0UqbguDpYHD0HRbMt1F1er3c414yWr", + sha1="37f3ac1b14e80cfaa99fa5f412c1e132480ed5b6") + + model_tf_efficientnet_b1_ns.load_state_dict(torch.load(weights_path,map_location=torch.device('cpu'))["model"]) + model = model_tf_efficientnet_b1_ns.efnet_model + filter_elems = set(["se", "act", "bn", "conv"]) + layer_list = [layer for layer, _ in model.named_modules() if not any(i in layer for i in filter_elems)] + print(layer_list) + print(len(layer_list)) + + for n, m in model.named_modules(): + if isinstance(m, nn.BatchNorm2d) and any(x in n for x in ["conv_stem"] + freeze_layers) or n == "bn1": + print(f"Freeze {n, m}") + m.eval() + + preprocessing = functools.partial(load_preprocess_images_custom, + preprocess_images=custom_image_preprocess, + ) + + wrapper = PytorchWrapper(identifier='my-model', model=model, preprocessing=preprocessing, batch_size=8) + + wrapper.image_size = image_crop + return wrapper + + +def get_layers(name): + assert name == 'effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288' + return ['blocks', 'blocks.0', 'blocks.0.0', 'blocks.0.1', + 'blocks.1', 'blocks.1.0', 'blocks.1.1', 'blocks.1.2', + 'blocks.2', 'blocks.2.0', 'blocks.2.1', 'blocks.2.2', + 'blocks.3', 'blocks.3.0', 'blocks.3.1', 'blocks.3.2', 'blocks.3.3', + 'blocks.4', 'blocks.4.0', + 'blocks.4.0.conv_pw', 'blocks.4.0.conv_dw', 'blocks.4.0.conv_pwl', 'blocks.4.1', 'blocks.4.1.conv_pw', + 'blocks.4.1.conv_dw', 'blocks.4.1.conv_pwl', 'blocks.4.2', + 'blocks.4.2.conv_pw', 'blocks.4.2.conv_dw', 'blocks.4.2.conv_pwl', 'blocks.4.3', 'blocks.4.3.conv_pw', + 'blocks.4.3.conv_dw', 'blocks.4.3.conv_pwl', 'blocks.5', + 'blocks.5.0', 'blocks.5.0.conv_pw', 'blocks.5.0.conv_dw', 'blocks.5.0.conv_pwl', 'blocks.5.1', + 'blocks.5.1.conv_pw', 'blocks.5.1.conv_dw', 'blocks.5.1.conv_pwl', + 'blocks.5.2', 'blocks.5.2.conv_pw', 'blocks.5.2.conv_dw', 'blocks.5.2.conv_pwl', 'blocks.5.3', + 'blocks.5.3.conv_pw', 'blocks.5.3.conv_dw', 'blocks.5.3.conv_pwl', + 'blocks.5.4', 'blocks.5.4.conv_pw', 'blocks.5.4.conv_dw', 'blocks.5.4.conv_pwl', 'blocks.6', 'blocks.6.0', + 'blocks.6.0.conv_pw', 'blocks.6.0.conv_dw', + 'blocks.6.0.conv_pwl', 'blocks.6.1', 'blocks.6.1.conv_pw', 'blocks.6.1.conv_dw', 'blocks.6.1.conv_pwl', + 'global_pool', 'global_pool.flatten', 'global_pool.pool'] + + +def get_bibtex(model_identifier): + return """@InProceedings{pmlr-v97-tan19a, + title = {{E}fficient{N}et: Rethinking Model Scaling for Convolutional Neural Networks}, + author = {Tan, Mingxing and Le, Quoc}, + booktitle = {Proceedings of the 36th International Conference on Machine Learning}, + pages = {6105--6114}, + year = {2019}, + editor = {Chaudhuri, Kamalika and Salakhutdinov, Ruslan}, + volume = {97}, + series = {Proceedings of Machine Learning Research}, + month = {09--15 Jun}, + publisher = {PMLR}, + pdf = {http://proceedings.mlr.press/v97/tan19a/tan19a.pdf}, + url = {https://proceedings.mlr.press/v97/tan19a.html}, + abstract = {Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are given. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves stateof-the-art 84.4% top-1 / 97.1% top-5 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet (Huang et al., 2018). Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flower (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters.} + }""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/requirements.txt b/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/requirements.txt new file mode 100644 index 000000000..013e2715d --- /dev/null +++ b/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/requirements.txt @@ -0,0 +1,5 @@ +numpy +torch +timm +albumentations +pillow diff --git a/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/test.py b/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/test.py new file mode 100644 index 000000000..ee89b0bc4 --- /dev/null +++ b/brainscore_vision/models/effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288') + assert model.identifier == 'effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288' \ No newline at end of file From 0dd3bc8e158d8b57d57faa9e9a681ded5955e86b Mon Sep 17 00:00:00 2001 From: Katherine Fairchild Date: Tue, 9 Jul 2024 23:29:25 -0400 Subject: [PATCH 56/68] brain-score.org submission (user:550) | (public:False) (#1010) * add my_custom_model to models * rename directory * add renamed directory --------- Co-authored-by: AutoJenkins Co-authored-by: samwinebrake --- .../models/Soumyadeep_inf_1/__init__.py | 5 +++++ .../model.py | 17 ++++++++++------- .../setup.py | 0 .../test.py | 0 .../models/my_custom_model/__init__.py | 5 ----- 5 files changed, 15 insertions(+), 12 deletions(-) create mode 100755 brainscore_vision/models/Soumyadeep_inf_1/__init__.py rename brainscore_vision/models/{my_custom_model => Soumyadeep_inf_1}/model.py (74%) rename brainscore_vision/models/{my_custom_model => Soumyadeep_inf_1}/setup.py (100%) rename brainscore_vision/models/{my_custom_model => Soumyadeep_inf_1}/test.py (100%) delete mode 100755 brainscore_vision/models/my_custom_model/__init__.py diff --git a/brainscore_vision/models/Soumyadeep_inf_1/__init__.py b/brainscore_vision/models/Soumyadeep_inf_1/__init__.py new file mode 100755 index 000000000..d1f3ba487 --- /dev/null +++ b/brainscore_vision/models/Soumyadeep_inf_1/__init__.py @@ -0,0 +1,5 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['Soumyadeep_inf_1'] = lambda: ModelCommitment(identifier='Soumyadeep_inf_1', activations_model=get_model('Soumyadeep_inf_1'), layers=get_layers('Soumyadeep_inf_1')) diff --git a/brainscore_vision/models/my_custom_model/model.py b/brainscore_vision/models/Soumyadeep_inf_1/model.py similarity index 74% rename from brainscore_vision/models/my_custom_model/model.py rename to brainscore_vision/models/Soumyadeep_inf_1/model.py index c2fd63ad8..f2e0145aa 100755 --- a/brainscore_vision/models/my_custom_model/model.py +++ b/brainscore_vision/models/Soumyadeep_inf_1/model.py @@ -17,31 +17,34 @@ def get_model_list(): - return ['fast_2px_step2_eps2_repeat1_trial1_model_best'] + return ['Soumyadeep_inf_1'] def get_model(name): trained_model = models.__dict__['resnet50']() trained_model = torch.nn.DataParallel(trained_model) - url = "https://drive.google.com/uc?id=1kNgOmtSrCQnyINVGw_l9vishwaNeqGN4" - output = "fast_2px_step2_eps2_repeat1_trial1_model_best.pth.tar" + #url = "https://drive.google.com/uc?id=1kNgOmtSrCQnyINVGw_l9vishwaNeqGN4" + # fast_2px_step2_eps2_repeat1_trial1_model_best.pth.tar + + url = "https://drive.google.com/uc?id=1uSpR1uVQkyW6wUY9DM6XbXNbhdW89cOc" + output = "Soumyadeep_inf_1.pth.tar" gdown.download(url, output) - checkpoint = torch.load("fast_2px_step2_eps2_repeat1_trial1_model_best.pth.tar", map_location=torch.device('cpu')) + checkpoint = torch.load("Soumyadeep_inf_1.pth.tar", map_location=torch.device('cpu')) trained_model.load_state_dict(checkpoint['state_dict']) trained_model = trained_model.module - assert name == 'fast_2px_step2_eps2_repeat1_trial1_model_best' + assert name == 'Soumyadeep_inf_1' preprocessing = functools.partial(load_preprocess_images, image_size=224) # Wrap the model in PytorchWrapper directly - activations_model = PytorchWrapper(identifier='fast_2px_step2_eps2_repeat1_trial1_model_best', model=trained_model, preprocessing=preprocessing) + activations_model = PytorchWrapper(identifier='Soumyadeep_inf_1', model=trained_model, preprocessing=preprocessing) return activations_model def get_layers(name): - assert name == 'fast_2px_step2_eps2_repeat1_trial1_model_best' + assert name == 'Soumyadeep_inf_1' return ['layer1.0.conv3', 'layer1.1.conv3', 'layer1.2.conv3', 'layer2.0.conv3', 'layer2.1.conv3', 'layer2.2.conv3', 'layer2.3.conv3', 'layer3.0.conv3', 'layer3.1.conv3', 'layer3.2.conv3', 'layer3.3.conv3', 'layer3.4.conv3', 'layer3.5.conv3', diff --git a/brainscore_vision/models/my_custom_model/setup.py b/brainscore_vision/models/Soumyadeep_inf_1/setup.py similarity index 100% rename from brainscore_vision/models/my_custom_model/setup.py rename to brainscore_vision/models/Soumyadeep_inf_1/setup.py diff --git a/brainscore_vision/models/my_custom_model/test.py b/brainscore_vision/models/Soumyadeep_inf_1/test.py similarity index 100% rename from brainscore_vision/models/my_custom_model/test.py rename to brainscore_vision/models/Soumyadeep_inf_1/test.py diff --git a/brainscore_vision/models/my_custom_model/__init__.py b/brainscore_vision/models/my_custom_model/__init__.py deleted file mode 100755 index f65aac9d8..000000000 --- a/brainscore_vision/models/my_custom_model/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from brainscore_vision import model_registry -from brainscore_vision.model_helpers.brain_transformation import ModelCommitment -from .model import get_model, get_layers - -model_registry['fast_2px_step2_eps2_repeat1_trial1_model_best'] = lambda: ModelCommitment(identifier='fast_2px_step2_eps2_repeat1_trial1_model_best', activations_model=get_model('fast_2px_step2_eps2_repeat1_trial1_model_best'), layers=get_layers('fast_2px_step2_eps2_repeat1_trial1_model_best')) From 11df9915207f269aea6f76897a0fb15ee179adcc Mon Sep 17 00:00:00 2001 From: Michael Ferguson Date: Wed, 10 Jul 2024 04:40:40 -0400 Subject: [PATCH 57/68] Add Ferguson2024 benchmark (competition, honorable mention) (#618) * Initial commit of Ferguson2024 benchmark (just data for now) * netCDF case type error fixed * added S3 data to init * data tests are done * Initial commit of Ferguson2024 benchmark (just data for now) * netCDF case type error fixed * added S3 data to init * data tests are done * ferguson commit * add value_delta metric (bounded) * finalized value_delta metric using sigmoid and tests * Benchmark round 2 * added more benchmark code * added precomputed ceiling for all 14 stimuli * small changes to make code better * generalized integral method * Added lambdas to init * initial same-different decoder * Added integral boostrap methods to helpers * Data Packaging updates * add fitting stimuli * add merged assembly training data * add decoder and generate first model scores * removed old test * initial test.py commit * add benchmark, metric and data requirements.txt * First half of tests * finalize tests * fixed import * retrigger test * PR comments --- .../benchmarks/ferguson2024/__init__.py | 24 ++ .../benchmarks/ferguson2024/benchmark.py | 210 +++++++++++++++ .../ferguson2024/helpers/helpers.py | 251 ++++++++++++++++++ .../benchmarks/ferguson2024/requirements.txt | 5 + .../benchmarks/ferguson2024/test.py | 112 ++++++++ .../data/ferguson2024/__init__.py | 2 +- .../data/ferguson2024/requirements.txt | 2 + .../metrics/value_delta/metric.py | 1 + .../metrics/value_delta/requirements.txt | 1 + .../brain_transformation/behavior.py | 1 - 10 files changed, 607 insertions(+), 2 deletions(-) create mode 100644 brainscore_vision/benchmarks/ferguson2024/__init__.py create mode 100644 brainscore_vision/benchmarks/ferguson2024/benchmark.py create mode 100644 brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py create mode 100644 brainscore_vision/benchmarks/ferguson2024/requirements.txt create mode 100644 brainscore_vision/benchmarks/ferguson2024/test.py create mode 100644 brainscore_vision/data/ferguson2024/requirements.txt create mode 100644 brainscore_vision/metrics/value_delta/requirements.txt diff --git a/brainscore_vision/benchmarks/ferguson2024/__init__.py b/brainscore_vision/benchmarks/ferguson2024/__init__.py new file mode 100644 index 000000000..e0cdeb578 --- /dev/null +++ b/brainscore_vision/benchmarks/ferguson2024/__init__.py @@ -0,0 +1,24 @@ +from brainscore_vision import benchmark_registry +from brainscore_vision.benchmarks.ferguson2024.benchmark import Ferguson2024ValueDelta + +DATASETS = ['circle_line', 'color', 'convergence', 'eighth', + 'gray_easy', 'gray_hard', 'half', 'juncture', + 'lle', 'llh', 'quarter', 'round_f', + 'round_v', 'tilted_line'] + +benchmark_registry['Ferguson2024circle_line-value_delta'] = lambda: Ferguson2024ValueDelta("circle_line") +benchmark_registry['Ferguson2024color-value_delta'] = lambda: Ferguson2024ValueDelta("color") +benchmark_registry['Ferguson2024convergence-value_delta'] = lambda: Ferguson2024ValueDelta("convergence") +benchmark_registry['Ferguson2024eighth-value_delta'] = lambda: Ferguson2024ValueDelta("eighth") +benchmark_registry['Ferguson2024gray_easy-value_delta'] = lambda: Ferguson2024ValueDelta("gray_easy") +benchmark_registry['Ferguson2024gray_hard-value_delta'] = lambda: Ferguson2024ValueDelta("gray_hard") +benchmark_registry['Ferguson2024half-value_delta'] = lambda: Ferguson2024ValueDelta("half") +benchmark_registry['Ferguson2024juncture-value_delta'] = lambda: Ferguson2024ValueDelta("juncture") +benchmark_registry['Ferguson2024lle-value_delta'] = lambda: Ferguson2024ValueDelta("lle") +benchmark_registry['Ferguson2024llh-value_delta'] = lambda: Ferguson2024ValueDelta("llh") +benchmark_registry['Ferguson2024quarter-value_delta'] = lambda: Ferguson2024ValueDelta("quarter") +benchmark_registry['Ferguson2024round_f-value_delta'] = lambda: Ferguson2024ValueDelta("round_f") +benchmark_registry['Ferguson2024round_v-value_delta'] = lambda: Ferguson2024ValueDelta("round_v") +benchmark_registry['Ferguson2024tilted_line-value_delta'] = lambda: Ferguson2024ValueDelta("tilted_line") + + diff --git a/brainscore_vision/benchmarks/ferguson2024/benchmark.py b/brainscore_vision/benchmarks/ferguson2024/benchmark.py new file mode 100644 index 000000000..512e58f9c --- /dev/null +++ b/brainscore_vision/benchmarks/ferguson2024/benchmark.py @@ -0,0 +1,210 @@ +import numpy as np +from brainscore_core import Metric +from brainio.stimuli import StimulusSet +from tqdm import tqdm +from typing import Dict +import xarray as xr +from brainscore_vision import load_dataset, load_stimulus_set +from brainio.assemblies import BehavioralAssembly +from brainscore_vision.benchmark_helpers.screen import place_on_screen +import pandas as pd +from brainscore_vision.benchmarks import BenchmarkBase +from brainscore_vision.metrics import Score +from brainscore_vision.metrics.value_delta import ValueDelta +from brainscore_vision.model_interface import BrainModel +from .helpers.helpers import generate_summary_df, calculate_integral, HUMAN_INTEGRAL_ERRORS, LAPSE_RATES, \ + split_dataframe, boostrap_integral + +BIBTEX = """ + @misc{ferguson_ngo_lee_dicarlo_schrimpf_2024, + title={How Well is Visual Search Asymmetry predicted by a Binary-Choice, Rapid, Accuracy-based Visual-search, Oddball-detection (BRAVO) task?}, + url={osf.io/5ba3n}, + DOI={10.17605/OSF.IO/5BA3N}, + publisher={OSF}, + author={Ferguson, Michael E, Jr and Ngo, Jerry and Lee, Michael and DiCarlo, James and Schrimpf, Martin}, + year={2024}, + month={Jun} +} +""" + +# These ceilings were precomputed to save time in benchmark execution +PRECOMPUTED_CEILINGS = {'circle_line': [0.883, 0.078], 'color': [0.897, 0.072], 'convergence': [0.862, 0.098], + 'eighth': [0.852, 0.107], 'gray_easy': [0.907, 0.064], 'gray_hard': [0.863, 0.099], + 'half': [0.898, 0.077], 'juncture': [0.767, 0.141], 'lle': [0.831, 0.116], 'llh': [0.812, 0.123], + 'quarter': [0.876, 0.087], 'round_f': [0.874, 0.088], 'round_v': [0.853, 0.101], + 'tilted_line': [0.912, 0.064]} + + +for dataset in PRECOMPUTED_CEILINGS.keys(): + identifier = f"Ferguson2024{dataset}ValueDelta" + globals()[identifier] = lambda dataset=dataset: _Ferguson2024ValueDelta(dataset) + + +class _Ferguson2024ValueDelta(BenchmarkBase): + def __init__(self, experiment, precompute_ceiling=True): + self._experiment = experiment + self._precompute_ceiling = precompute_ceiling + self._metric = ValueDelta(scale=0.75) # 0.75 chosen after calibrating with ceiling + self._fitting_stimuli = gather_fitting_stimuli(combine_all=False, experiment=self._experiment) + self._assembly = load_dataset(f'Ferguson2024_{self._experiment}') + self._visual_degrees = 8 + self._number_of_trials = 3 + self._ceiling = calculate_ceiling(self._precompute_ceiling, self._experiment, self._assembly, self._metric, num_loops=500) + super(_Ferguson2024ValueDelta, self).__init__(identifier="Ferguson2024", version=1, ceiling_func=self._ceiling, + parent='behavior', bibtex=BIBTEX) + + def __call__(self, candidate: BrainModel) -> Score: + + # add truth labels to stimuli and training data + self._assembly.stimulus_set["image_label"] = np.where(self._assembly.stimulus_set["image_number"] % 2 == 0, "oddball", "same") + self._fitting_stimuli["image_label"] = np.where(self._fitting_stimuli["image_number"] % 2 == 0, "oddball", "same") + + # fit logistic binary decoder and perform task: + fitting_stimuli = place_on_screen(self._fitting_stimuli, target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees) + candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli) + stimulus_set = place_on_screen(self._assembly.stimulus_set, target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees) + model_labels_raw = candidate.look_at(stimulus_set, number_of_trials=self._number_of_trials) + model_labels = process_model_choices(model_labels_raw) + human_integral = get_integral_data(self._assembly, self._experiment)['integral'] + model_integral = get_integral_data(model_labels, self._experiment)['integral'] + raw_score = self._metric(model_integral, human_integral) + ceiling = self._ceiling + score = Score(min(max(raw_score / ceiling, 0), 1)) # ensure ceiled score is between 0 and 1 + score.attrs['raw'] = raw_score + score.attrs['ceiling'] = ceiling + return score + + +def Ferguson2024ValueDelta(experiment): + return _Ferguson2024ValueDelta(experiment) + + +def calculate_ceiling(precompute_ceiling, dataset: str, assembly: BehavioralAssembly, metric: Metric, + num_loops: int) -> Score: + """ + - A Version of split-half reliability, in which the data is split randomly in half + and the metric is called on those two halves. + + :param precompute_ceiling: True if using precomputed ceilings. Should almost always be True. + :param dataset: str, the prefix of the experiment subtype, ex: "tilted_line" or "lle" + :param assembly: the human behavioral data to look at + :param metric: of type Metric, used to calculate the score between two subjects + :return: Score object consisting of the score between two halves of human data + :param num_loops: int. number of times the score is calculated. Final score is the average of all of these. + """ + if precompute_ceiling: + score = Score(PRECOMPUTED_CEILINGS[dataset][0]) + score.attrs['error'] = PRECOMPUTED_CEILINGS[dataset][1] + score.attrs[Score.RAW_VALUES_KEY] = [PRECOMPUTED_CEILINGS[dataset][0], PRECOMPUTED_CEILINGS[dataset][1]] + return score + else: + scores = [] + for i in tqdm(range(num_loops)): + half_1, half_2 = split_dataframe(assembly, seed=i) + half_1_score = get_integral_data(half_1, dataset)["integral"] + half_2_score = get_integral_data(half_2, dataset)["integral"] + score = metric(half_1_score, half_2_score) + scores.append(score) + + score = Score(np.mean(scores)) + scores = np.array(scores, dtype=float) + score.attrs['error'] = np.std(scores) + score.attrs[Score.RAW_VALUES_KEY] = [np.mean(scores), np.std(scores)] + print(f"Dataset: {dataset}, score: {np.mean(scores)}, error: {score.attrs['error']}") + return score + + +def get_integral_data(assembly: BehavioralAssembly, experiment: str, precompute_boostrap=True) -> Dict: + """ + - Generates summary data for the experiment and calculates the integral of delta line + + :param assembly: the human behavioral data to look at + :param experiment: str, the prefix of the experiment subtype, ex: "tilted_line" or "lle" + :param precompute_boostrap: True if using precomputed integral errors, else manually compute (Slow!) + :return: tuple of calculated human integral and its boostrapped (precomputed) error + """ + lapse_rate = LAPSE_RATES[experiment] + blue_data = generate_summary_df(assembly, lapse_rate, "first") + orange_data = generate_summary_df(assembly, lapse_rate, "second") + integral = calculate_integral(blue_data, orange_data) + integral_error = HUMAN_INTEGRAL_ERRORS[experiment] if precompute_boostrap else \ + boostrap_integral(blue_data, orange_data)["integral_std"] + return dict(zip(["integral", "integral_error"], [integral, integral_error])) + + +def gather_fitting_stimuli(combine_all=True, experiment="") -> StimulusSet: + """ + Combines all the training stimuli into one merged stimulus_set, or returns the selected set for the experiment + + :param combine_all: True if you want to collapse all 14 stimuli pairs into 1 stimulus set + :param experiment: only if combine_all is False, then specify which experiment's stimuli you want + :return: merged StimulusSet of all 14 stimulus set's training data or a StimulusSet of that experiment only + """ + + if combine_all: + all_stimulus_sets = [] + for experiment in PRECOMPUTED_CEILINGS.keys(): + stimulus_set = load_stimulus_set(f"Ferguson2024_{experiment}_training_stimuli") + all_stimulus_sets.append(stimulus_set) + merged_dataframe = pd.concat(all_stimulus_sets, axis=0, ignore_index=True) + merged_dataframe.name = "Ferguson2024_merged_training_stimuli" + merged_dataframe.identifier = "Ferguson2024_merged_training_stimuli" + merged_stimulus_set = StimulusSet(merged_dataframe) + merged_stimulus_set.identifier = "Ferguson2024_merged_training_stimuli" + return merged_stimulus_set + else: + return load_stimulus_set(f"Ferguson2024_{experiment}_training_stimuli") + + +def process_model_choices(raw_model_labels: BehavioralAssembly) -> BehavioralAssembly: + """ + Takes in a raw Assembly and applies a softmax and threshold to get a string label for a class. Also + builds the model's assembly to resemble a humans by adding fields (trial_type, num_distractors, etc) + + :param raw_model_labels: a BehavioralAssembly that has two raw values corresponding to class choices + :return: new assembly with an added dim, "model_choice" based on the raw values + """ + distractor_mapping = { + 1.0: [0, 1, 6, 7, 12, 13, 18, 19], + 5.0: [2, 3, 8, 9, 14, 15, 20, 21] + } + distractor_lookup = {image_num: str(distractor) for distractor, images in distractor_mapping.items() for image_num + in images} + + def num_distractors(image_num): + return distractor_lookup.get(image_num, "11.0") # default to 11.0 if not found, which should never happen + + def softmax(x): + e_x = np.exp(x - np.max(x, axis=1, keepdims=True)) + return e_x / e_x.sum(axis=1, keepdims=True) + + def is_even(image_num): + return 1 if image_num % 2 == 0 else 0 + + def is_equal(label, image_type): + return 1 if label == image_type else 0 + + softmax_values = softmax(raw_model_labels.values) + labels = np.where(softmax_values[:, 0] > 0.5, 1, 0) + labels_string = np.where(softmax_values[:, 0] > 0.5, "oddball", "same") + model_choices = xr.DataArray(labels, dims=["presentation"], coords={"presentation": raw_model_labels.coords["presentation"]}) + model_choices = model_choices.assign_coords(trial_type=('presentation', np.array(['normal'] * 48))) + model_choices = model_choices.assign_coords(participant_id=('presentation', np.array(['model'] * 48))) + distractor_nums = [num_distractors(image_num) for image_num in model_choices.coords['image_number'].values] + model_choices = model_choices.assign_coords(distractor_nums=('presentation', distractor_nums)) + target_present = [is_even(image_num) for image_num in model_choices.coords['image_number'].values] + model_choices = model_choices.assign_coords(target_present=('presentation', target_present)) + model_choices = model_choices.assign_coords(labels_string=('presentation', labels_string)) + correct = [ + is_equal(label, image_type) + for label, image_type in zip( + model_choices.coords['labels_string'].values, + model_choices.coords['image_label'].values + ) + ] + model_choices = model_choices.assign_coords(correct=('presentation', correct)) + return BehavioralAssembly(model_choices) + + diff --git a/brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py b/brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py new file mode 100644 index 000000000..c28f390c0 --- /dev/null +++ b/brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py @@ -0,0 +1,251 @@ +import pandas as pd +import numpy as np +from brainio.assemblies import BehavioralAssembly +import sympy as sp +from pandas import DataFrame +from tqdm import tqdm +import statistics +from typing import Dict + +# number of distractors in the experiment +DISTRACTOR_NUMS = ["1.0", "5.0", "11.0"] + +# These are precomputed subject average lapse rates +LAPSE_RATES = {'circle_line': 0.0335, 'color': 0.0578, 'convergence': 0.0372, 'eighth': 0.05556, + 'gray_easy': 0.0414, 'gray_hard': 0.02305, 'half': 0.0637, 'juncture': 0.3715, + 'lle': 0.0573, 'llh': 0.0402, 'quarter': 0.0534, 'round_f': 0.08196, + 'round_v': 0.0561, 'tilted_line': 0.04986} + +# These are precomputed integral errors, computed by bootstrapping (see below) +HUMAN_INTEGRAL_ERRORS = {'circle_line': 0.3078, 'color': 0.362, 'convergence': 0.2773, 'eighth': 0.278, + 'gray_easy': 0.309, 'gray_hard': 0.4246, 'half': 0.3661, 'juncture': 0.2198, + 'lle': 0.209, 'llh': 0.195, 'quarter': 0.2959, 'round_f': 0.344, + 'round_v': 0.2794, 'tilted_line': 0.3573} + + +def get_adjusted_rate(acc: float, lapse_rate: float, n_way: int = 2) -> float: + """ + - Adjusts the raw accuracy by a lapse rate correction + + :param acc: float, the raw accuracy + :param lapse_rate: a precomputed float defined above that represents avg. subject lapse rate in experiment + :param n_way: int, (default value 2), the number of ways to divide by + + :return: float, the SEM of that array + """ + return (acc - lapse_rate * (1.0 / n_way)) / (1 - lapse_rate) + + +def sem(array: BehavioralAssembly) -> float: + """ + - Get the standard error of the mean (SEM) of an assembly + + :param array: the assembly to look at + :return: float, the SEM of that array + """ + array = np.array(array) + return np.std(array) / np.sqrt(len(array)) + + +def get_line(point_1: tuple, point_2: tuple) -> str: + """ + - Calculate the equation of a line from two points + + :param point_1: tuple in the form (x, y) of first point + :param point_2: tuple in the form (x, y) of second point + :return: str, equation of a line in the form y = mx + b + """ + x1, y1 = point_1 + x2, y2 = point_2 + m = (y2 - y1) / (x2 - x1) + c = y1 - m * x1 + equation = f"{m:.10f}*x + {c:.10f}" + return equation + + +def integrate_line(equation: str, lower: float, upper: float) -> float: + """ + - Integrates an equation. + + :param equation: a string representing the equation of the line to integrate + :param lower: float, the lower bound of the definite integral + :param upper: float, the upper bound of the definite integral + :return: float, representing the integral of that line + """ + x = sp.symbols('x') + integral_definite = sp.integrate(equation, (x, lower, upper)) + return integral_definite + + +def get_averages(df_blue: DataFrame, df_orange: DataFrame, num_distractors: str) -> (float, float): + """ + - Gets the per-distractor averages for a block + + :param df_blue: the first (blue) block of data (target on a field of distractors) + :param df_orange: the second (orange) block of data (distractor on a field of targets) + :param num_distractors: string of a float representing how many distractors to look at + :return: + """ + blue_df = df_blue[df_blue["distractor_nums"] == num_distractors] + orange_df = df_orange[df_orange["distractor_nums"] == num_distractors] + blue_val_avg = blue_df["correct"].values.mean() + orange_val_avg = orange_df["correct"].values.mean() + return blue_val_avg, orange_val_avg + + +def calculate_integral(df_blue: DataFrame, df_orange: DataFrame) -> float: + """ + - Manually calculates the integral under the delta line + + :param df_blue: the first (blue) block of data (target on a field of distractors) + :param df_orange: the second (orange) block of data (distractor on a field of targets) + :return: float representing the integral of the delta line + """ + blue_low_avg, orange_low_avg = get_averages(df_blue, df_orange, "1.0") + blue_mid_avg, orange_mid_avg = get_averages(df_blue, df_orange, "5.0") + blue_high_avg, orange_high_avg = get_averages(df_blue, df_orange, "11.0") + + # compute deltas + low_delta = orange_low_avg - blue_low_avg + mid_delta = orange_mid_avg - blue_mid_avg + high_delta = orange_high_avg - blue_high_avg + + # get equation of line through 1-5 + point_1 = (1, low_delta) + point_2 = (5, mid_delta) + equation = get_line(point_1, point_2) + first_half = integrate_line(equation, 1, 5) + + # get line 5-11 equation and integrate + point_3 = (11, high_delta) + equation_2 = get_line(point_2, point_3) + second_half = integrate_line(equation_2, 5, 11) + + # add up integral + total_integral = round(first_half + second_half, 4) + return total_integral + + +def calculate_accuracy(df: BehavioralAssembly, lapse_rate: float) -> float: + """ + - Calculates a per-subject lapse rate-corrected accuracy for an assembly. + - Subject accuracy is averaged over all images with a certain distractor size and repetition coords (i.e. these + coords are mixed togather and the accuracy is calculated over this merged assembly). + + :param df: DataFrame Object that contains experimental data + :param lapse_rate: a precomputed float defined above that represents avg. subject lapse rate in experiment + :return: float representing the adjusted (for lapse rate) accuracy of that subject + """ + accuracy = len(df[df["correct"] == True]) / len(df) + adjusted_accuracy = get_adjusted_rate(accuracy, lapse_rate) + return adjusted_accuracy + + +def generate_summary_df(assembly: BehavioralAssembly, lapse_rate: float, block: str) -> pd.DataFrame: + """ + + - Takes in raw assembly data and outputs a dataframe of summary statistics, used for benchmark. + - For each distractor size, accuracy is calculated per subject. + + :param assembly: the data in the form of a BehavioralAssembly + :param lapse_rate: a precomputed float defined above that represents avg. subject lapse rate in experiment + :param block: str that defined what data to look at, "first (blue) or second (orange) + :return: a DataFrame object that contains needed summary data + """ + filtered_data = assembly[(assembly["trial_type"] == "normal") & (assembly["block"] == block)] + participants = list(set(filtered_data['participant_id'].values)) + + summary_data = [] + for subject in participants: + subject_data = filtered_data[filtered_data["participant_id"] == subject] + for distractor_num in DISTRACTOR_NUMS: + distractor_df = subject_data[subject_data["distractor_nums"] == str(distractor_num)] + if len(distractor_df) == 0: + continue + adjusted_acc = calculate_accuracy(distractor_df, lapse_rate) + summary_data.append({ + 'distractor_nums': distractor_num, + 'participant_id': subject, + 'correct': adjusted_acc + }) + summary_df = pd.DataFrame(summary_data, columns=['distractor_nums', 'participant_id', 'correct']) + return summary_df + + +def split_dataframe(df: BehavioralAssembly, seed: int) -> (BehavioralAssembly, BehavioralAssembly): + """ + - Takes in one DF and splits it into two, randomly, on the presentation dim + + :param df: The DataFrame (assembly) to split + :param seed: a seed for the numpy rng + :return: Two DataFrames (assemblies) + """ + if seed is not None: + np.random.seed(seed) + shuffled_indices = np.random.permutation(df.presentation.size) + half = len(shuffled_indices) // 2 + indices_1 = shuffled_indices[:half] + indices_2 = shuffled_indices[half:] + dataarray_1 = df.isel(presentation=indices_1) + dataarray_2 = df.isel(presentation=indices_2) + return dataarray_1, dataarray_2 + + +def get_acc_delta(df_blue: DataFrame, df_orange: DataFrame, num_dist: str) -> float: + """ + Helper function for bootstrapping. Calculates an accuracy delta on a specific subject/distractor. + + :param df_blue: DataFrame, the first (blue) block of data (target on a field of distractors) + :param df_orange: DataFrame, the second (orange) block of data (distractor on a field of targets) + :param num_dist: string, number of distractors + :return: float representing the requested accuracy delta. + """ + d_blue = df_blue[df_blue["distractor_nums"] == num_dist] + d_orange = df_orange[df_orange["distractor_nums"] == num_dist] + sampled_blue = d_blue.sample(n=1, replace=True) + sampled_orange = d_orange.sample(n=1, replace=True) + accuracy_delta = sampled_blue["correct"].values[0] - sampled_orange["correct"].values[0] + return accuracy_delta + + +def boostrap_integral(df_blue: DataFrame, df_orange: DataFrame, num_loops: int = 500) -> Dict: + """ + Computes an error (std) on integral calculation by bootstrapping the integral via slices of subjects. + + :param df_blue: DataFrame, the first (blue) block of data (target on a field of distractors) + :param df_orange: DataFrame, the second (orange) block of data (distractor on a field of targets) + :param num_loops: int, number of times the boostrap will run (and thus take the average) + :return: Dict of values {bootstrapped_integral, bootstrapped_integral_error) + """ + num_subjects = len(set(df_blue["participant_id"])) + integral_list = [] + for i in tqdm(range(num_loops)): + accuracy_delta_lows = [] + accuracy_delta_mids = [] + accuracy_delta_highs = [] + for j in range(num_subjects): + accuracy_delta_lows.append(get_acc_delta(df_blue, df_orange, num_dist="1.0")) # get low distractor case + accuracy_delta_mids.append(get_acc_delta(df_blue, df_orange, num_dist="5.0")) # get mid distractor case + accuracy_delta_highs.append(get_acc_delta(df_blue, df_orange, num_dist="11.0")) # get high distractor case + average_low_delta = statistics.mean(accuracy_delta_highs) + average_mid_delta = statistics.mean(accuracy_delta_mids) + average_high_delta = statistics.mean(accuracy_delta_lows) + + # get equation for line through points 1 - 5 and integrate: + point_1 = (1, average_low_delta) + point_2 = (5, average_mid_delta) + equation = get_line(point_1, point_2) + first_half = integrate_line(equation, 1, 5) + + # get line 5-11 equation and integrate + point_3 = (11, average_high_delta) + equation_2 = get_line(point_2, point_3) + second_half = integrate_line(equation_2, 5, 11) + + total_integral = first_half + second_half + integral_list.append(total_integral) + data_array = np.array(integral_list, dtype=float) + integral_mean = -np.mean(data_array) + integral_std = np.std(data_array) + + return {"bootstrap_integral_mean": integral_mean, "integral_std": integral_std} diff --git a/brainscore_vision/benchmarks/ferguson2024/requirements.txt b/brainscore_vision/benchmarks/ferguson2024/requirements.txt new file mode 100644 index 000000000..0c672a783 --- /dev/null +++ b/brainscore_vision/benchmarks/ferguson2024/requirements.txt @@ -0,0 +1,5 @@ +pandas +sympy +tqdm +statistics +xarray diff --git a/brainscore_vision/benchmarks/ferguson2024/test.py b/brainscore_vision/benchmarks/ferguson2024/test.py new file mode 100644 index 000000000..906324c31 --- /dev/null +++ b/brainscore_vision/benchmarks/ferguson2024/test.py @@ -0,0 +1,112 @@ +from pathlib import Path +import pytest +from pytest import approx +from brainio.assemblies import BehavioralAssembly +from brainscore_vision import benchmark_registry, load_benchmark +from brainscore_vision.benchmark_helpers import PrecomputedFeatures +from brainscore_vision.data_helpers import s3 + + +class TestExist: + @pytest.mark.parametrize('benchmark', [ + 'Ferguson2024circle_line-value_delta', + 'Ferguson2024color-value_delta', + 'Ferguson2024convergence-value_delta', + 'Ferguson2024eighth-value_delta', + 'Ferguson2024gray_easy-value_delta', + 'Ferguson2024gray_hard-value_delta', + 'Ferguson2024half-value_delta', + 'Ferguson2024juncture-value_delta', + 'Ferguson2024lle-value_delta', + 'Ferguson2024llh-value_delta', + 'Ferguson2024quarter-value_delta', + 'Ferguson2024round_f-value_delta', + 'Ferguson2024round_v-value_delta', + 'Ferguson2024tilted_line-value_delta', + ]) + def test_benchmark_registry(self, benchmark): + assert benchmark in benchmark_registry + + +class TestBehavioral: + @pytest.mark.private_access + @pytest.mark.parametrize('benchmark, expected_ceiling', [ + ('Ferguson2024circle_line-value_delta', approx(0.883, abs=0.001)), + ('Ferguson2024color-value_delta', approx(0.897, abs=0.001)), + ('Ferguson2024convergence-value_delta', approx(0.862, abs=0.001)), + ('Ferguson2024eighth-value_delta', approx(0.852, abs=0.001)), + ('Ferguson2024gray_easy-value_delta', approx(0.907, abs=0.001)), + ('Ferguson2024gray_hard-value_delta', approx(0.863, abs=0.001)), + ('Ferguson2024half-value_delta', approx(0.898, abs=0.001)), + ('Ferguson2024juncture-value_delta', approx(0.767, abs=0.001)), + ('Ferguson2024lle-value_delta', approx(0.831, abs=0.001)), + ('Ferguson2024llh-value_delta', approx(0.812, abs=0.001)), + ('Ferguson2024quarter-value_delta', approx(0.876, abs=0.001)), + ('Ferguson2024round_f-value_delta', approx(0.874, abs=0.001)), + ('Ferguson2024round_v-value_delta', approx(0.853, abs=0.001)), + ('Ferguson2024tilted_line-value_delta', approx(0.912, abs=0.001)), + ]) + def test_benchmark_ceiling(self, benchmark, expected_ceiling): + benchmark = load_benchmark(benchmark) + ceiling = benchmark._ceiling + assert ceiling == expected_ceiling + + @pytest.mark.private_access + @pytest.mark.parametrize('benchmark, expected_raw_score', [ + ('Ferguson2024circle_line-value_delta', approx(0.143, abs=0.001)), + ('Ferguson2024color-value_delta', approx(0.645, abs=0.001)), + ('Ferguson2024convergence-value_delta', approx(0.024, abs=0.001)), + ('Ferguson2024eighth-value_delta', approx(0.093, abs=0.001)), + ('Ferguson2024gray_easy-value_delta', approx(0.799, abs=0.001)), + ('Ferguson2024gray_hard-value_delta', approx(0.609, abs=0.001)), + ('Ferguson2024half-value_delta', approx(0.379, abs=0.001)), + ('Ferguson2024juncture-value_delta', approx(0.191, abs=0.001)), + ('Ferguson2024lle-value_delta', approx(0.208, abs=0.001)), + ('Ferguson2024llh-value_delta', approx(0.654, abs=0.001)), + ('Ferguson2024quarter-value_delta', approx(0.223, abs=0.001)), + ('Ferguson2024round_f-value_delta', approx(0.455, abs=0.001)), + ('Ferguson2024round_v-value_delta', approx(0.212, abs=0.001)), + ('Ferguson2024tilted_line-value_delta', approx(0.445, abs=0.001)), + ]) + def test_model_raw_score(self, benchmark, expected_raw_score): + benchmark_object = load_benchmark(benchmark) + filename = f"alexnet_{benchmark}.nc" + precomputed_features = Path(__file__).parent / filename + s3.download_file_if_not_exists(precomputed_features, + bucket='brainscore-vision', remote_filepath=f'benchmarks/Ferguson2024/{filename}') + precomputed_features = BehavioralAssembly.from_files(file_path=precomputed_features) + precomputed_features = PrecomputedFeatures(precomputed_features, visual_degrees=8) + score = benchmark_object(precomputed_features) + raw_score = score.raw + # division by ceiling <= 1 should result in higher score + assert score >= raw_score + assert raw_score == expected_raw_score + + @pytest.mark.private_access + @pytest.mark.parametrize('benchmark, expected_ceiled_score', [ + ('Ferguson2024circle_line-value_delta', approx(0.162, abs=0.001)), + ('Ferguson2024color-value_delta', approx(0.719, abs=0.001)), + ('Ferguson2024convergence-value_delta', approx(0.028, abs=0.001)), + ('Ferguson2024eighth-value_delta', approx(0.109, abs=0.001)), + ('Ferguson2024gray_easy-value_delta', approx(0.882, abs=0.001)), + ('Ferguson2024gray_hard-value_delta', approx(0.706, abs=0.001)), + ('Ferguson2024half-value_delta', approx(0.423, abs=0.001)), + ('Ferguson2024juncture-value_delta', approx(0.248, abs=0.001)), + ('Ferguson2024lle-value_delta', approx(0.250, abs=0.001)), + ('Ferguson2024llh-value_delta', approx(0.805, abs=0.001)), + ('Ferguson2024quarter-value_delta', approx(0.255, abs=0.001)), + ('Ferguson2024round_f-value_delta', approx(0.520, abs=0.001)), + ('Ferguson2024round_v-value_delta', approx(0.249, abs=0.001)), + ('Ferguson2024tilted_line-value_delta', approx(0.489, abs=0.001)), + ]) + def test_model_ceiled_score(self, benchmark, expected_ceiled_score): + benchmark_object = load_benchmark(benchmark) + filename = f"alexnet_{benchmark}.nc" + precomputed_features = Path(__file__).parent / filename + s3.download_file_if_not_exists(precomputed_features, + bucket='brainscore-vision', remote_filepath=f'benchmarks/Ferguson2024/{filename}') + precomputed_features = BehavioralAssembly.from_files(file_path=precomputed_features) + precomputed_features = PrecomputedFeatures(precomputed_features, visual_degrees=8) + score = benchmark_object(precomputed_features) + assert score == expected_ceiled_score + diff --git a/brainscore_vision/data/ferguson2024/__init__.py b/brainscore_vision/data/ferguson2024/__init__.py index 1838e77d6..1094d4314 100644 --- a/brainscore_vision/data/ferguson2024/__init__.py +++ b/brainscore_vision/data/ferguson2024/__init__.py @@ -398,4 +398,4 @@ csv_sha1="098eb5999e9c4b723abc35ade862d2dc45899230", zip_sha1="e92533d8aded07ed90ef25650d0cf07c3a458be7", csv_version_id="l.8gS70OruIDfDU9Oj.DAWw6BQNB.LKc", - zip_version_id="cAv1IPQkKX8Jey1gFc4VCwItECIiSlLV") \ No newline at end of file + zip_version_id="cAv1IPQkKX8Jey1gFc4VCwItECIiSlLV") diff --git a/brainscore_vision/data/ferguson2024/requirements.txt b/brainscore_vision/data/ferguson2024/requirements.txt new file mode 100644 index 000000000..2748df2f9 --- /dev/null +++ b/brainscore_vision/data/ferguson2024/requirements.txt @@ -0,0 +1,2 @@ +pathlib +pandas \ No newline at end of file diff --git a/brainscore_vision/metrics/value_delta/metric.py b/brainscore_vision/metrics/value_delta/metric.py index 5a67d241b..8572bb17c 100644 --- a/brainscore_vision/metrics/value_delta/metric.py +++ b/brainscore_vision/metrics/value_delta/metric.py @@ -5,6 +5,7 @@ ''' This metric takes in two floats and gives a score between 0 and 1 based on how far apart the numbers are, using a sigmoid function. + ''' diff --git a/brainscore_vision/metrics/value_delta/requirements.txt b/brainscore_vision/metrics/value_delta/requirements.txt new file mode 100644 index 000000000..296d65452 --- /dev/null +++ b/brainscore_vision/metrics/value_delta/requirements.txt @@ -0,0 +1 @@ +numpy \ No newline at end of file diff --git a/brainscore_vision/model_helpers/brain_transformation/behavior.py b/brainscore_vision/model_helpers/brain_transformation/behavior.py index 97cc62dcd..49b85d83d 100644 --- a/brainscore_vision/model_helpers/brain_transformation/behavior.py +++ b/brainscore_vision/model_helpers/brain_transformation/behavior.py @@ -251,7 +251,6 @@ def order_preserving_unique(array): _, indices = np.unique(array, return_index=True) return array[np.sort(indices)] - class OddOneOut(BrainModel): def __init__(self, identifier: str, activations_model, layer: Union[str, List[str]]): """ From 0a98633cfd9eaa98db488ac844340ac38537d5d8 Mon Sep 17 00:00:00 2001 From: Andrea Costantino <59078281+costantinoai@users.noreply.github.com> Date: Wed, 10 Jul 2024 10:54:16 +0200 Subject: [PATCH 58/68] Add maniquet2024 benchmark (competition) (#1019) * maniquet2024 benchmarks * maniquet2024 data * maniquet2024 updated metrics (ceiling calculation) * Update brainscore_vision/metrics/maniquet2024_metrics/metric.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/benchmarks/benchmarks/maniquet2024/benchmark.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/benchmarks/benchmarks/maniquet2024/benchmark.py Co-authored-by: Martin Schrimpf * Update brainscore_vision/metrics/maniquet2024_metrics/metric.py Co-authored-by: Martin Schrimpf * fix redundant `benchmarks` directory --------- Co-authored-by: Martin Schrimpf Co-authored-by: Martin Schrimpf --- .../benchmarks/maniquet2024/__init__.py | 6 + .../benchmarks/maniquet2024/benchmark.py | 199 ++++++++ .../benchmarks/maniquet2024/test.py | 17 + .../data/maniquet2024/__init__.py | 57 +++ .../data/maniquet2024/data_packaging.py | 151 ++++++ brainscore_vision/data/maniquet2024/test.py | 16 + .../metrics/maniquet2024_metrics/__init__.py | 19 + .../metrics/maniquet2024_metrics/metric.py | 429 ++++++++++++++++++ .../metrics/maniquet2024_metrics/test.py | 8 + 9 files changed, 902 insertions(+) create mode 100644 brainscore_vision/benchmarks/maniquet2024/__init__.py create mode 100644 brainscore_vision/benchmarks/maniquet2024/benchmark.py create mode 100644 brainscore_vision/benchmarks/maniquet2024/test.py create mode 100644 brainscore_vision/data/maniquet2024/__init__.py create mode 100644 brainscore_vision/data/maniquet2024/data_packaging.py create mode 100644 brainscore_vision/data/maniquet2024/test.py create mode 100644 brainscore_vision/metrics/maniquet2024_metrics/__init__.py create mode 100644 brainscore_vision/metrics/maniquet2024_metrics/metric.py create mode 100644 brainscore_vision/metrics/maniquet2024_metrics/test.py diff --git a/brainscore_vision/benchmarks/maniquet2024/__init__.py b/brainscore_vision/benchmarks/maniquet2024/__init__.py new file mode 100644 index 000000000..acc15b632 --- /dev/null +++ b/brainscore_vision/benchmarks/maniquet2024/__init__.py @@ -0,0 +1,6 @@ +from brainscore_vision import benchmark_registry +from .benchmark import Maniquet2024ConfusionSimilarity, Maniquet2024TasksConsistency + +benchmark_registry['Maniquet2024-confusion_similarity'] = lambda: Maniquet2024ConfusionSimilarity() +benchmark_registry['Maniquet2024-tasks_consistency'] = lambda: Maniquet2024TasksConsistency() + diff --git a/brainscore_vision/benchmarks/maniquet2024/benchmark.py b/brainscore_vision/benchmarks/maniquet2024/benchmark.py new file mode 100644 index 000000000..bb9f22107 --- /dev/null +++ b/brainscore_vision/benchmarks/maniquet2024/benchmark.py @@ -0,0 +1,199 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Jun 21 23:15:17 2024 + +@author: costantino_ai +""" +from brainscore_vision.benchmarks import BenchmarkBase +from brainscore_vision.benchmark_helpers.screen import place_on_screen +from brainscore_vision.model_interface import BrainModel +from brainscore_vision import load_stimulus_set, load_metric, load_dataset +from brainscore_vision.utils import LazyLoad + + +BIBTEX = """@article {Maniquet2024.04.02.587669, + author = {Maniquet, Tim and de Beeck, Hans Op and Costantino, Andrea Ivan}, + title = {Recurrent issues with deep neural network models of visual recognition}, + elocation-id = {2024.04.02.587669}, + year = {2024}, + doi = {10.1101/2024.04.02.587669}, + publisher = {Cold Spring Harbor Laboratory}, + URL = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669}, + eprint = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669.full.pdf}, + journal = {bioRxiv} +}""" + + +class _Maniquet2024ConfusionSimilarity(BenchmarkBase): + """ + A benchmark class to measure the similarity between model-generated confusion probabilities + and human confusion data in visual tasks, specifically designed for the Maniquet2024 dataset. + + Attributes: + _metric (ConfusionSimilarity): The metric used to compare model outputs with human data. + _fitting_stimuli (StimulusSet): Stimulus set used for training or fitting the model. + _stimulus_set (StimulusSet): Stimulus set used for testing the model. + _human_assembly (DataAssembly): Human behavioral data for comparison. + _visual_degrees (int): The size of stimuli in visual degrees as presented to humans. + _number_of_trials (int): Number of trials to average over for the model predictions. + """ + + def __init__(self): + """ + Initializes the benchmark by setting up the necessary parameters. + """ + # Initialize the metric for evaluating confusion similarity + self._metric = load_metric('confusion_similarity') + + # Load training stimuli from the stimulus set registry + self._fitting_stimuli = load_stimulus_set('Maniquet2024-train') + + # Load testing stimuli from the stimulus set registry + self._stimulus_set = load_stimulus_set('Maniquet2024-test') + + # Load human behavioral data from the data registry + self._human_assembly = load_dataset('Maniquet2024') + + # Set the visual degrees to which the human data was exposed + self._visual_degrees = 8 + + # Set the number of trials to perform + self._number_of_trials = 1 + + # Call the parent class constructor to complete initialization + super(_Maniquet2024ConfusionSimilarity, self).__init__( + identifier="Maniquet2024-confusion_similarity'", + version=1, + ceiling_func=lambda: self._metric._ceiling(self._assembly), + parent="Maniquet2024", + bibtex=BIBTEX, + ) + + def __call__(self, candidate: BrainModel): + """ + Executes the benchmark by comparing the candidate model's confusion probabilities against human data. + + Args: + candidate (BrainModel): The model being evaluated. + + Returns: + float: The similarity score between the model and human data. + """ + # Start the model on the task of predicting confusion probabilities + fitting_stimuli = place_on_screen(self._fitting_stimuli, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees) + candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli) + + # Prepare the stimulus set by placing it on a virtual screen at a scale appropriate for the model + stimulus_set = place_on_screen(self._stimulus_set, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees) + + # Model looks at the stimulus set and returns confusion probabilities + probabilities = candidate.look_at(stimulus_set, number_of_trials=self._number_of_trials) + + # Compute the confusion similarity score between model probabilities and human assembly data + raw_score = self._metric(probabilities, self._human_assembly) + # Normalize by ceiling + ceiling = self._ceiling(self._human_assembly, precomputed=True) + score = raw_score / ceiling + score.attrs["raw"] = raw_score + score.attrs["ceiling"] = ceiling + return score + + +class _Maniquet2024TasksConsistency(BenchmarkBase): + """ + A benchmarking class designed to evaluate the consistency of the human accuracy profiles across + all tasks with the model's accuracy profiles across the same tasks. + + Attributes: + _metric (TasksConsistency): The metric for evaluating task consistency between the model and human data. + _fitting_stimuli (StimulusSet): The set of stimuli used for model training or calibration. + _stimulus_set (StimulusSet): The set of stimuli used for testing the model's predictions. + _human_assembly (DataAssembly): The dataset containing human response data for comparison. + _visual_degrees (int): The visual size of the stimuli as perceived by human subjects. + _number_of_trials (int): The number of trials over which model predictions are averaged. + """ + + def __init__(self): + """ + Initializes the benchmark setup, including loading necessary datasets, defining the metric, and setting + up the parameters for the evaluation. + """ + # Metric for evaluating the consistency of task performance + self._metric = load_metric('tasks_consistency') + + # Load training stimuli from the stimulus set registry + self._fitting_stimuli = load_stimulus_set('Maniquet2024-train') + + # Load testing stimuli from the stimulus set registry + self._stimulus_set = load_stimulus_set('Maniquet2024-test') + + # Load human behavioral data from the data registry + self._human_assembly = load_dataset('Maniquet2024') + + # Set the visual context to match human study conditions + self._visual_degrees = 8 + + # Define the number of trials for model evaluation + self._number_of_trials = 1 + + # Initialize parent class with benchmark-specific metadata + super(_Maniquet2024TasksConsistency, self).__init__( + identifier="Maniquet2024-tasks_consistency", + version=1, + ceiling_func=lambda: self._metric.ceiling(self._human_assembly), + parent="Maniquet2024", + bibtex=BIBTEX, + ) + + def __call__(self, candidate: BrainModel): + """ + Executes the benchmark by comparing the candidate model's task performance probabilities + against human data, and returns a similarity score. + + Args: + candidate (BrainModel): The neural model being evaluated. + + Returns: + float: A similarity score indicating how closely the model's responses match human responses. + """ + # Task the model with generating predictions based on the fitting stimuli + fitting_stimuli = place_on_screen( + self._fitting_stimuli, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees, + ) + candidate.start_task(BrainModel.Task.probabilities, fitting_stimuli) + + # Adjust the stimulus presentation to match the model's expected input scale + stimulus_set = place_on_screen( + self._stimulus_set, + target_visual_degrees=candidate.visual_degrees(), + source_visual_degrees=self._visual_degrees, + ) + + # Obtain the model's predictions as confusion probabilities + probabilities = candidate.look_at( + stimulus_set, number_of_trials=self._number_of_trials + ) + + # Evaluate the consistency of model predictions with human data + raw_score = self._metric(probabilities, self._human_assembly) + # Normalize by ceiling + ceiling = self._ceiling(self._human_assembly, precomputed=True) + score = raw_score / ceiling + score.attrs["raw"] = raw_score + score.attrs["ceiling"] = ceiling + return score + + +def Maniquet2024ConfusionSimilarity(): + return _Maniquet2024ConfusionSimilarity() + + +def Maniquet2024TasksConsistency(): + return _Maniquet2024TasksConsistency() diff --git a/brainscore_vision/benchmarks/maniquet2024/test.py b/brainscore_vision/benchmarks/maniquet2024/test.py new file mode 100644 index 000000000..c0d8012b3 --- /dev/null +++ b/brainscore_vision/benchmarks/maniquet2024/test.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Mon Jun 24 17:22:59 2024 + +@author: costantino_ai +""" + +import pytest +from brainscore_vision import load_benchmark + +@pytest.mark.parametrize('benchmark', [ + 'Maniquet2024-confusion_similarity', + 'Maniquet2024-tasks_consistency', +]) +def test_benchmark_registry(benchmark): + assert load_benchmark(benchmark) is not None diff --git a/brainscore_vision/data/maniquet2024/__init__.py b/brainscore_vision/data/maniquet2024/__init__.py new file mode 100644 index 000000000..3b1114f00 --- /dev/null +++ b/brainscore_vision/data/maniquet2024/__init__.py @@ -0,0 +1,57 @@ +from brainio.assemblies import BehavioralAssembly +from brainscore_vision import stimulus_set_registry, data_registry +from brainscore_vision.data_helpers.s3 import ( + load_assembly_from_s3, + load_stimulus_set_from_s3, +) + +BIBTEX = """@article {Maniquet2024.04.02.587669, + author = {Maniquet, Tim and de Beeck, Hans Op and Costantino, Andrea Ivan}, + title = {Recurrent issues with deep neural network models of visual recognition}, + elocation-id = {2024.04.02.587669}, + year = {2024}, + doi = {10.1101/2024.04.02.587669}, + publisher = {Cold Spring Harbor Laboratory}, + URL = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669}, + eprint = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669.full.pdf}, + journal = {bioRxiv} +}""" + +# Human Stimulus Set +stimulus_set_registry["Maniquet2024"] = lambda: load_stimulus_set_from_s3( + identifier="Maniquet2024", + bucket="brainio-brainscore", + csv_sha1="ec61e1d7776a6c3b467fee862302edac8d4a156e", + zip_sha1="bbdaf09528974c4ca3ee4cddbc91e0e03351291f", + csv_version_id="HwA7hBw0KVt6O.S_eDTXCHjOxfXlK_N3", + zip_version_id="lDUmFncDxloQp_9.S3VcpiOIPa1sCr7N", +) + +# DNN test Stimulus Set +stimulus_set_registry["Maniquet2024-test"] = lambda: load_stimulus_set_from_s3( + identifier="Maniquet2024-test", + bucket="brainio-brainscore", + csv_sha1="993089ba4aaeffbc61303acb2a5171a5fa271ea5", + zip_sha1="39f9aaf13fdd66d284bcea99f187bb0c065144e4", + csv_version_id="G8mwsgXbuaodl_icHRzA9_LK1LeF1mco", + zip_version_id="O05BqRf79q78oQJXcN.iPeeEwNSOF2iS", +) + +# DNN train Stimulus Set +stimulus_set_registry["Maniquet2024-train"] = lambda: load_stimulus_set_from_s3( + identifier="Maniquet2024-train", + bucket="brainio-brainscore", + csv_sha1="da965af3ae5ab6e49d46c28f682ef4b75d0a2045", + zip_sha1="6685effb52f6870175988c47892b3f9a916a0375", + csv_version_id="1y.4Een3cC_ju8lqOZcSeLTXxsoPq5Wg", + zip_version_id="WUCsCnvwUWVSLaioFsKXrxpOGdIMt8ij", +) + +# Human Data Assembly (behavioural) +data_registry["Maniquet2024"] = lambda: load_assembly_from_s3( + identifier="Maniquet2024", + version_id="ppAs1vv02btHmfmUMtLejawBuA96Iv2j", + sha1="39b8b7b29fad080ebba6df8a46ac4426261342d5", + bucket="brainio-brainscore", + cls=BehavioralAssembly, +) diff --git a/brainscore_vision/data/maniquet2024/data_packaging.py b/brainscore_vision/data/maniquet2024/data_packaging.py new file mode 100644 index 000000000..c03f4739b --- /dev/null +++ b/brainscore_vision/data/maniquet2024/data_packaging.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sat Jun 22 00:24:30 2024 + +@author: costantino_ai +""" + +import os +import logging +from pathlib import Path +import pandas as pd +from brainio.assemblies import BehavioralAssembly +from brainio.stimuli import StimulusSet +from brainio.packaging import package_data_assembly, package_stimulus_set + +# Setup logging +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" +) + +# Constants +ROOT_DIRECTORY = "./maniquet2024/private" +TAG = "Maniquet2024" + + +def load_stimulus_set(stimuli_directory, tag): + """ + Load and package stimuli from the specified directory. + + Args: + stimuli_directory (str): Directory containing stimulus files. + tag (str): Tag to assign to the stimulus set. + + Returns: + StimulusSet: Packaged set of stimuli with metadata. + """ + logging.info("Loading stimuli from directory: %s", stimuli_directory) + stimuli = [] + stimulus_paths = {} + + for filepath in Path(stimuli_directory).glob("*.png"): + stimulus_id = filepath.stem + parts = filepath.stem.split("_") + exemplar_number, manipulation, manipulation_details, category = ( + parts[0], + parts[1], + parts[2], + parts[3], + ) + + stimulus_paths[stimulus_id] = filepath + stimuli.append( + { + "stimulus_id": stimulus_id, + "manipulation": manipulation, + "manipulation_details": manipulation_details, + "image_label": category, + "exemplar_number": exemplar_number, + } + ) + + stimulus_set = StimulusSet(stimuli) + stimulus_set.stimulus_paths = stimulus_paths + stimulus_set.name = tag + logging.info("Total stimuli loaded: %d", len(stimulus_set)) + return stimulus_set + + +def load_behavioral_data(data_file, tag): + """ + Load and package experimental data from a CSV file. + + Args: + data_file (str): Path to the CSV file containing experimental data. + tag (str): Tag to assign to the behavioral data assembly. + + Returns: + BehavioralAssembly: Data assembly of behavioral responses. + """ + logging.info("Loading behavioral data from file: %s", data_file) + df = pd.read_csv(data_file) + assembly = BehavioralAssembly( + df["acc"], + dims=["presentation"], + coords={ + "stimulus_id": ("presentation", df["stimulus_id"].values), + "manipulation": ("presentation", df["condition"].values), + "manipulation_details": ("presentation", df["task_details"].values), + "mask": ("presentation", df["mask"].values), + "image_label": ("presentation", df["category"].values), + "prediction": ("presentation", df["prediction"].values), + "response": ("presentation", df["response"].values), + "reaction_time": ("presentation", df["rt"].values), + "subject_id": ("presentation", df["subj"].values), + "task": ("presentation", df["task_long"].values), + }, + ) + assembly.name = tag + logging.info( + "Data assembly loaded with %d presentations", len(assembly["presentation"]) + ) + return assembly + + +def main(): + """ + Main function to package stimulus set and experimental data, and upload to S3. + """ + logging.info("Starting the data packaging process.") + + # Load stimuli from directories + human_stimuli_directory = os.path.join(ROOT_DIRECTORY, "human_stimuli") + dnntest_stimuli_directory = os.path.join(ROOT_DIRECTORY, "dnn_stimuli/test") + dnntrain_stimuli_directory = os.path.join(ROOT_DIRECTORY, "dnn_stimuli/train") + + human_stimulus_set = load_stimulus_set(human_stimuli_directory, TAG) + dnntest_stimulus_set = load_stimulus_set(dnntest_stimuli_directory, f"{TAG}-test") + dnntrain_stimulus_set = load_stimulus_set(dnntrain_stimuli_directory, f"{TAG}-train") + + # Upload stimuli + human_stimulus_meta = package_stimulus_set( + None, human_stimulus_set, human_stimulus_set.name, "brainio-brainscore" + ) + dnntest_stimulus_meta = package_stimulus_set( + None, dnntest_stimulus_set, dnntest_stimulus_set.name, "brainio-brainscore" + ) + dnntrain_stimulus_meta = package_stimulus_set( + None, dnntrain_stimulus_set, dnntrain_stimulus_set.name, "brainio-brainscore" + ) + + # Load human data assembly + data_file = os.path.join(ROOT_DIRECTORY, "data/human_data_andrea.csv") + data_assembly = load_behavioral_data(data_file, TAG) + assembly_meta = package_data_assembly( + None, + data_assembly, + data_assembly.name, + human_stimulus_set.name, + "BehavioralAssembly", + "brainio-brainscore", + ) + + # print(human_stimulus_meta) + # print(dnntest_stimulus_meta) + # print(dnntrain_stimulus_meta) + # print(assembly_meta) + + +if __name__ == "__main__": + main() diff --git a/brainscore_vision/data/maniquet2024/test.py b/brainscore_vision/data/maniquet2024/test.py new file mode 100644 index 000000000..85469c40a --- /dev/null +++ b/brainscore_vision/data/maniquet2024/test.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Jun 20 17:32:25 2024 + +@author: costantino_ai +""" +import pytest +from brainscore_vision import load_dataset, load_stimulus_set + +@pytest.mark.private_access +def test_existence(): + assert load_stimulus_set('Maniquet2024') is not None + assert load_dataset('Maniquet2024') is not None + + \ No newline at end of file diff --git a/brainscore_vision/metrics/maniquet2024_metrics/__init__.py b/brainscore_vision/metrics/maniquet2024_metrics/__init__.py new file mode 100644 index 000000000..19f9e2ea8 --- /dev/null +++ b/brainscore_vision/metrics/maniquet2024_metrics/__init__.py @@ -0,0 +1,19 @@ +from brainscore_vision import metric_registry +from .metric import ConfusionSimilarity, TasksConsistency + +BIBTEX = """@article {Maniquet2024.04.02.587669, + author = {Maniquet, Tim and de Beeck, Hans Op and Costantino, Andrea Ivan}, + title = {Recurrent issues with deep neural network models of visual recognition}, + elocation-id = {2024.04.02.587669}, + year = {2024}, + doi = {10.1101/2024.04.02.587669}, + publisher = {Cold Spring Harbor Laboratory}, + URL = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669}, + eprint = {https://www.biorxiv.org/content/early/2024/04/10/2024.04.02.587669.full.pdf}, + journal = {bioRxiv} +}""" + +metric_registry['confusion_similarity'] = ConfusionSimilarity +metric_registry['tasks_consistency'] = TasksConsistency + + diff --git a/brainscore_vision/metrics/maniquet2024_metrics/metric.py b/brainscore_vision/metrics/maniquet2024_metrics/metric.py new file mode 100644 index 000000000..5f15f290d --- /dev/null +++ b/brainscore_vision/metrics/maniquet2024_metrics/metric.py @@ -0,0 +1,429 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Jun 23 00:44:19 2024 + +@author: costantino_ai +""" +import itertools +import numpy as np +from sklearn.metrics import confusion_matrix +from scipy.stats import pearsonr +from brainscore_core.metrics import Metric, Score +from brainio.assemblies import BehavioralAssembly + + +class ConfusionSimilarity(Metric): + """ + A metric to compute the similarity between model-generated confusion matrices and human confusion data. + + Methods: + _extract_subjects(assembly): Extracts and sorts unique subject identifiers from the assembly. + _rollout_matrix(matrix, remove_diagonal=True): Flattens a matrix into a vector, optionally removing diagonal elements. + _label_from_probability(probabilities): Derives predicted labels from probabilities. + _accuracy(y_true, y_pred): Calculates the accuracy of predictions. + _ceiling(assembly, precomputed=True): Computes the ceiling performance by assessing the highest correlation across subjects. + __call__(probabilities, human_assembly): Computes the correlation between model and human confusion matrices normalized by the ceiling. + """ + + def _extract_subjects(self, assembly): + """ + Extracts and sorts unique subject identifiers from the assembly. + + Args: + assembly (xarray.Dataset): The data assembly containing subject IDs. + + Returns: + list: Sorted list of unique subject IDs. + """ + return list(sorted(set(assembly["subject_id"].values))) + + def _rollout_matrix(self, matrix, remove_diagonal=True): + """ + Flattens a matrix into a vector. Optionally removes diagonal elements to ignore self-comparison. + + Args: + matrix (np.array): A square matrix. + remove_diagonal (bool): Whether to remove the diagonal elements of the matrix. + + Returns: + np.array: The flattened matrix as a vector. + """ + if remove_diagonal: + # Create a mask to remove diagonal elements from the matrix. + mask = np.eye(matrix.shape[0], dtype=bool) + return matrix[~mask].ravel() + else: + return matrix.ravel() + + def _label_from_probability(self, probabilities): + """ + Derives predicted labels from probabilities by selecting the class with the highest probability. + + Args: + probabilities (xarray.Dataset): Dataset containing class probabilities. + + Returns: + tuple: Arrays of true labels and predicted labels. + """ + # Extract the class with the highest probability for each instance. + classes = probabilities.choice.values + indices = np.argmax(probabilities.values, axis=1) + y_pred = classes[indices] + y_true = probabilities.image_label.values + return y_true, y_pred + + def _accuracy(self, y_true, y_pred): + """ + Calculates the accuracy of predictions. + + Args: + y_true (np.array): True labels. + y_pred (np.array): Predicted labels. + + Returns: + float: The accuracy of the predictions. + """ + return sum(y_true == y_pred) / len(y_pred) + + def _ceiling(self, assembly, precomputed=True): + """ + Compute the noise ceiling of a confusion matrix using split-half correlations at the group level. + + Args: + assembly: (Human) Assembly with expected columns 'predicted'and 'image_label'. + precomputed (Bool): If true, use precomputed ceiling measure to save time. + + Returns: + score (float): Noise ceiling average. + """ + if precomputed: + # This is to save quite a lot of time. It was precomputed on the Maniquet2024 + # human data assembly, which includes 218 participants tested on the + # Maniquet2024 stimulus set. + return 0.53526 + + # Get labels and subjects lists + labels = list(set(assembly.image_label.values)) + subjects = self._extract_subjects(assembly) + + # Start recording correlation scores + correlation_scores = [] + + # Create confusion matrices for all participants + confmats = {sub: + confusion_matrix( + y_true=assembly.loc[assembly['subject_id'] == sub]["image_label"], + y_pred=assembly.loc[assembly['subject_id'] == sub]["prediction"], + labels=labels, + ) for sub in subjects} + + # Run 100 split-half permutations + for i in range(100): + + # Get random half of participants + half_subjects = random.sample(list(subjects), k=len(subjects) // 2) + + # Get average of confusion matrices for the two groups + part_one = np.mean([confmats[subject] for subject in half_subjects], axis=0) + part_two = np.mean([confmats[subject] for subject in subjects if subject not in half_subjects], axis=0) + + # Compute Pearson correlation between the two confusion matrices + correlation_score = pearsonr( + self._rollout_matrix(part_one), + self._rollout_matrix(part_two), + )[0] + correlation_scores.append(correlation_score) + + # Average split-half correlations as a measure of reliability + ceiling = np.mean(correlation_scores) + + return ceiling + + def __call__( + self, probabilities: BehavioralAssembly, human_assembly: BehavioralAssembly + ) -> Score: + """ + Computes the correlation between model and human confusion matrices normalized by the ceiling. + + Args: + probabilities (BehavioralAssembly): Model's predicted probabilities. + human_assembly (BehavioralAssembly): Human baseline responses. + + Returns: + Score: The normalized correlation score as a performance metric. + """ + assert sorted(set(probabilities.choice.values)) == sorted( + set(human_assembly.image_label.values) + ) + + # Extract labels from the model probabilities. + y_true, y_pred = self._label_from_probability(probabilities) + + # Calculate the model's confusion matrix. + dnn_confmat = confusion_matrix( + y_true=y_true, y_pred=y_pred, labels=probabilities.choice.values + ) + + # Calculate the human confusion matrix. + human_confmat = confusion_matrix( + y_true=human_assembly["image_label"], + y_pred=human_assembly["prediction"], + labels=probabilities.choice.values, + ) + + # Compute the Pearson correlation between the model and human confusion matrices. + correlation_score = pearsonr( + self._rollout_matrix(human_confmat), self._rollout_matrix(dnn_confmat) + )[0] + score = Score(correlation_score) + + return score + + +class TasksConsistency(Metric): + """ + A metric to compute the consistency between model and human accuracy profiles across different tasks. + + Methods: + _extract_subjects(assembly): Extracts and sorts unique subject identifiers from the assembly. + _extract_tasks(assembly): Extracts and sorts unique task identifiers from the assembly. + _rollout_matrix(matrix, remove_diagonal=True): Flattens a matrix into a vector, optionally removing diagonal elements. + _label_from_probability(probabilities): Derives predicted labels from probabilities. + _accuracy(y_true, y_pred): Calculates the accuracy of predictions. + _ceiling(assembly, precomputed=True): Computes the ceiling performance by assessing the highest correlation across subjects. + _map_human_to_dnn_categories(human_task): Maps a human task name to the corresponding DNN categories of 'manipulation' and 'manipulation_details'. + __call__(probabilities, human_assembly): Computes the correlation between model and human confusion matrices normalized by the ceiling. + """ + + def _extract_subjects(self, assembly): + """ + Extracts and sorts unique subject identifiers from the assembly. + + Args: + assembly (xarray.Dataset): The data assembly containing subject IDs. + + Returns: + list: Sorted list of unique subject IDs. + """ + return list(sorted(set(assembly["subject_id"].values))) + + def _extract_tasks(self, assembly): + """ + Extracts and sorts unique task identifiers from the assembly. + + Args: + assembly (xarray.Dataset): The data assembly containing task IDs. + + Returns: + list: Sorted list of unique task IDs. + """ + return list(sorted(set(assembly["task"].values))) + + def _rollout_matrix(self, matrix, remove_diagonal=True): + """ + Flattens a matrix into a vector. Optionally removes diagonal elements to ignore self-comparison. + + Args: + matrix (np.array): A square matrix. + remove_diagonal (bool): Whether to remove the diagonal elements of the matrix. + + Returns: + np.array: The flattened matrix as a vector. + """ + if remove_diagonal: + # Create a mask to remove diagonal elements from the matrix. + mask = np.eye(matrix.shape[0], dtype=bool) + return matrix[~mask].ravel() + else: + return matrix.ravel() + + def _label_from_probability(self, probabilities): + """ + Derives predicted labels from probabilities by selecting the class with the highest probability. + + Args: + probabilities (xarray.Dataset): Dataset containing class probabilities. + + Returns: + tuple: Arrays of true labels and predicted labels. + """ + # Extract the class with the highest probability for each instance. + classes = probabilities.choice.values + indices = np.argmax(probabilities.values, axis=1) + y_pred = classes[indices] + y_true = probabilities.image_label.values + return y_true, y_pred + + def _accuracy(self, y_true, y_pred): + """ + Calculates the accuracy of predictions. + + Args: + y_true (np.array): True labels. + y_pred (np.array): Predicted labels. + + Returns: + float: The accuracy of the predictions. + """ + return sum(y_true == y_pred) / len(y_pred) + + def _ceiling(self, assembly, precomputed=True): + """ + Computes the ceiling performance by assessing the average split-half correlation across subjects. + + Args: + assembly (xarray.Dataset): The data assembly containing subject data. + precomputed (bool): Whether to use precomputed ceiling value. + + Returns: + Score: The average correlation score across all subject pairs. + """ + if precomputed: + # This precomputed value is based on the Maniquet2024 human data assembly, + # which includes 218 participants tested on the Maniquet2024 stimulus set. + return 0.99810 + + # Initialize an empty list to store correlations for each iteration + iter_task_correlations = [] + + # Perform 50 iterations for split-half correlation + for i in range(50): + + # Randomly split the data assembly into two halves + n_rows = int(np.round(len(assembly) / 2)) + half = np.random.randint(0, len(assembly), size=n_rows) + part_one, part_two = assembly[half], assembly[~half] + + # Extract performance vectors for each half across all tasks + perf_vec_one = [ + float(np.mean(part_one[part_one["task"] == task])) + for task in self.human_tasks + ] + perf_vec_two = [ + float(np.mean(part_two.loc[part_two["task"] == task])) + for task in self.human_tasks + ] + + # Calculate the Pearson correlation between the performance vectors of the two halves + corr_perf = pearsonr(perf_vec_one, perf_vec_two)[0] + + # Append the correlation result to the list for this iteration + iter_task_correlations.append(corr_perf) + + return np.mean(iter_task_correlations) + + def _map_human_to_dnn_categories(self, human_task): + """ + Maps a human task name to the corresponding DNN categories of 'manipulation' and 'manipulation_details'. + + Args: + human_task (str): A task name from the human tasks list. + + Returns: + tuple: A tuple where the first element is the 'manipulation' and the second is 'manipulation_details'. + """ + # Mapping based on the provided details + manipulation_mapping = { + "clutter": "clutter", + "control": "control", + "occlusion": "occluder", + "scrambling": "phasescrambling", + } + + detail_mapping = { + "heavy": "heavy", + "light": "light", + "highpass": "highpass", + "lowpass": "lowpass", + "few_large_blobs_high": "fewlarge-high", + "few_large_blobs_low": "fewlarge-low", + "few_large_deletion_high": "fewlarge-high", + "few_large_deletion_low": "fewlarge-low", + "many_small_blobs_high": "manysmall-high", + "many_small_blobs_low": "manysmall-low", + "many_small_deletion_high": "manysmall-high", + "many_small_deletion_low": "manysmall-low", + "few_large_partial_viewing_high": "fewlarge-high", + "few_large_partial_viewing_low": "fewlarge-low", + "many_small_partial_viewing_high": "manysmall-high", + "many_small_partial_viewing_low": "manysmall-low", + } + + parts = human_task.split("_") + if "control" in parts: + # Handle control separately as it doesn't fit other patterns + return ("control", "control") + + # Determine manipulation by first relevant keyword + manipulation = next( + (manipulation_mapping[key] for key in manipulation_mapping if key in parts), + None, + ) + + # Construct a detail key from remaining parts excluding known manipulation keys + detail_parts = [part for part in parts if part not in manipulation_mapping] + detail_key = "_".join(detail_parts) + + # Find the matching manipulation detail + manipulation_detail = detail_mapping.get( + detail_key, "control" + ) # Default to control if no match found + + return (manipulation, manipulation_detail) + + def __call__( + self, probabilities: BehavioralAssembly, human_assembly: BehavioralAssembly + ) -> Score: + """ + Computes the correlation between model and human accuracy profiles across tasks, normalized by the ceiling. + + Args: + probabilities (BehavioralAssembly): Model's predicted probabilities. + human_assembly (BehavioralAssembly): Human baseline responses. + + Returns: + Score: The normalized correlation score as a performance metric. + """ + assert sorted(set(probabilities.choice.values)) == sorted( + set(human_assembly.image_label.values) + ) + + # Get list of tasks + self.human_tasks = self._extract_tasks(human_assembly) + + # Store accuracies + dnn_accs = [] + human_accs = [] + + # Calculate the model's accuracy across tasks. + for human_task in self.human_tasks: + # Convert the human task into DNN lingo + manipulation, manipulation_details = self._map_human_to_dnn_categories( + human_task + ) + + # Extract labels from the model probabilities. + probabilities_filtered = probabilities[ + (probabilities["manipulation"] == manipulation) + & (probabilities["manipulation_details"] == manipulation_details) + ] + + dnn_y_true, dnn_y_pred = self._label_from_probability(probabilities_filtered) + dnn_acc = self._accuracy(dnn_y_true, dnn_y_pred) + dnn_accs.append(dnn_acc) + + # Extract labels from the human responses. + human_responses_filtered = human_assembly[ + human_assembly["task"] == human_task + ] + human_acc = self._accuracy( + human_responses_filtered["image_label"], + human_responses_filtered["prediction"], + ) + human_accs.append(human_acc) + + # Compute the Pearson correlation between the model and human accuracy profiles. + correlation_score = pearsonr(dnn_accs, human_accs)[0] + score = Score(correlation_score) + return score diff --git a/brainscore_vision/metrics/maniquet2024_metrics/test.py b/brainscore_vision/metrics/maniquet2024_metrics/test.py new file mode 100644 index 000000000..496156fc7 --- /dev/null +++ b/brainscore_vision/metrics/maniquet2024_metrics/test.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Jun 23 13:40:53 2024 + +@author: costantino_ai +""" + From b82a7a6a2432da9224dc29c6c2508f75cdfaa774 Mon Sep 17 00:00:00 2001 From: Michael Ferguson Date: Wed, 10 Jul 2024 10:22:41 -0400 Subject: [PATCH 59/68] add torchvision to requirements.txt (#1023) --- brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt b/brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt index bd7ed7583..d2f009b97 100644 --- a/brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt +++ b/brainscore_vision/models/ViT_L_32_imagenet1k/requirements.txt @@ -1,3 +1,4 @@ numpy torch +torchvision pytorch_pretrained_vit \ No newline at end of file From 3b68ac686ae30c62853d6edae0c908b274b89372 Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Wed, 10 Jul 2024 17:35:39 +0200 Subject: [PATCH 60/68] fix Ferguson2024 identifiers (#1024) --- brainscore_vision/benchmarks/ferguson2024/benchmark.py | 8 ++++---- brainscore_vision/benchmarks/ferguson2024/test.py | 10 ++++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/brainscore_vision/benchmarks/ferguson2024/benchmark.py b/brainscore_vision/benchmarks/ferguson2024/benchmark.py index 512e58f9c..e14713edc 100644 --- a/brainscore_vision/benchmarks/ferguson2024/benchmark.py +++ b/brainscore_vision/benchmarks/ferguson2024/benchmark.py @@ -4,13 +4,12 @@ from tqdm import tqdm from typing import Dict import xarray as xr -from brainscore_vision import load_dataset, load_stimulus_set +from brainscore_vision import load_dataset, load_stimulus_set, load_metric from brainio.assemblies import BehavioralAssembly from brainscore_vision.benchmark_helpers.screen import place_on_screen import pandas as pd from brainscore_vision.benchmarks import BenchmarkBase from brainscore_vision.metrics import Score -from brainscore_vision.metrics.value_delta import ValueDelta from brainscore_vision.model_interface import BrainModel from .helpers.helpers import generate_summary_df, calculate_integral, HUMAN_INTEGRAL_ERRORS, LAPSE_RATES, \ split_dataframe, boostrap_integral @@ -44,13 +43,14 @@ class _Ferguson2024ValueDelta(BenchmarkBase): def __init__(self, experiment, precompute_ceiling=True): self._experiment = experiment self._precompute_ceiling = precompute_ceiling - self._metric = ValueDelta(scale=0.75) # 0.75 chosen after calibrating with ceiling + self._metric = load_metric('value_delta', scale=0.75) # 0.75 chosen after calibrating with ceiling self._fitting_stimuli = gather_fitting_stimuli(combine_all=False, experiment=self._experiment) self._assembly = load_dataset(f'Ferguson2024_{self._experiment}') self._visual_degrees = 8 self._number_of_trials = 3 self._ceiling = calculate_ceiling(self._precompute_ceiling, self._experiment, self._assembly, self._metric, num_loops=500) - super(_Ferguson2024ValueDelta, self).__init__(identifier="Ferguson2024", version=1, ceiling_func=self._ceiling, + super(_Ferguson2024ValueDelta, self).__init__(identifier=f"Ferguson2024{self._experiment}-value_delta", + version=1, ceiling_func=self._ceiling, parent='behavior', bibtex=BIBTEX) def __call__(self, candidate: BrainModel) -> Score: diff --git a/brainscore_vision/benchmarks/ferguson2024/test.py b/brainscore_vision/benchmarks/ferguson2024/test.py index 906324c31..49b7a4c01 100644 --- a/brainscore_vision/benchmarks/ferguson2024/test.py +++ b/brainscore_vision/benchmarks/ferguson2024/test.py @@ -2,13 +2,13 @@ import pytest from pytest import approx from brainio.assemblies import BehavioralAssembly -from brainscore_vision import benchmark_registry, load_benchmark +from brainscore_vision import load_benchmark from brainscore_vision.benchmark_helpers import PrecomputedFeatures from brainscore_vision.data_helpers import s3 class TestExist: - @pytest.mark.parametrize('benchmark', [ + @pytest.mark.parametrize("identifier", [ 'Ferguson2024circle_line-value_delta', 'Ferguson2024color-value_delta', 'Ferguson2024convergence-value_delta', @@ -24,8 +24,10 @@ class TestExist: 'Ferguson2024round_v-value_delta', 'Ferguson2024tilted_line-value_delta', ]) - def test_benchmark_registry(self, benchmark): - assert benchmark in benchmark_registry + def test_benchmark_registry(self, identifier): + benchmark = load_benchmark(identifier) + assert benchmark is not None + assert benchmark.identifier == identifier class TestBehavioral: From e9ce42af2e82cf72a88eeebf7fab97954f8fe9ac Mon Sep 17 00:00:00 2001 From: Katherine Fairchild Date: Wed, 10 Jul 2024 20:08:28 -0400 Subject: [PATCH 61/68] add r101_eBarlow_lmda_02_1 to models (#1027) Co-authored-by: AutoJenkins --- .../models/r101_eBarlow_lmda_02_1/__init__.py | 9 +++ .../models/r101_eBarlow_lmda_02_1/model.py | 65 +++++++++++++++++++ .../models/r101_eBarlow_lmda_02_1/setup.py | 25 +++++++ .../models/r101_eBarlow_lmda_02_1/test.py | 1 + 4 files changed, 100 insertions(+) create mode 100644 brainscore_vision/models/r101_eBarlow_lmda_02_1/__init__.py create mode 100644 brainscore_vision/models/r101_eBarlow_lmda_02_1/model.py create mode 100644 brainscore_vision/models/r101_eBarlow_lmda_02_1/setup.py create mode 100644 brainscore_vision/models/r101_eBarlow_lmda_02_1/test.py diff --git a/brainscore_vision/models/r101_eBarlow_lmda_02_1/__init__.py b/brainscore_vision/models/r101_eBarlow_lmda_02_1/__init__.py new file mode 100644 index 000000000..131feab27 --- /dev/null +++ b/brainscore_vision/models/r101_eBarlow_lmda_02_1/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry["r101_eBarlow_lmda_02_1"] = lambda: ModelCommitment( + identifier="r101_eBarlow_lmda_02_1", + activations_model=get_model("r101_eBarlow_lmda_02_1"), + layers=get_layers("r101_eBarlow_lmda_02_1"), +) diff --git a/brainscore_vision/models/r101_eBarlow_lmda_02_1/model.py b/brainscore_vision/models/r101_eBarlow_lmda_02_1/model.py new file mode 100644 index 000000000..bca80a2bc --- /dev/null +++ b/brainscore_vision/models/r101_eBarlow_lmda_02_1/model.py @@ -0,0 +1,65 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +from urllib.request import urlretrieve +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch +from collections import OrderedDict + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ["r101_eBarlow_lmda_02_1"] + + +def get_model(name): + assert name == "r101_eBarlow_lmda_02_1" + url = "https://users.flatironinstitute.org/~tyerxa/equi_proj/training_checkpoints/resnet101/imagenet_1k/paired/lmda_0.2/Barlow_1/latest-rank0" + fh = urlretrieve(url) + state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"] + model = load_composer_classifier(state_dict) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + +def load_composer_classifier(sd): + model = torchvision.models.resnet.resnet101() + new_sd = OrderedDict() + for k, v in sd.items(): + if 'lin_cls' in k: + new_sd['fc.' + k.split('.')[-1]] = v + if ".f." not in k: + continue + parts = k.split(".") + idx = parts.index("f") + new_k = ".".join(parts[idx + 1 :]) + new_sd[new_k] = v + model.load_state_dict(new_sd, strict=True) + return model + +def get_layers(name): + assert name == "r101_eBarlow_lmda_02_1" + + outs = ["conv1", "layer1", "layer2", "layer3", "layer4", "avgpool", "fc"] + outs = ["layer4"] + return outs + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == "__main__": + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/r101_eBarlow_lmda_02_1/setup.py b/brainscore_vision/models/r101_eBarlow_lmda_02_1/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/r101_eBarlow_lmda_02_1/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/r101_eBarlow_lmda_02_1/test.py b/brainscore_vision/models/r101_eBarlow_lmda_02_1/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/r101_eBarlow_lmda_02_1/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration From 2212126d994cde62be7ef3989a3849b302c39a68 Mon Sep 17 00:00:00 2001 From: Katherine Fairchild Date: Wed, 10 Jul 2024 20:45:02 -0400 Subject: [PATCH 62/68] add r101_eBarlow_Vanilla_1 to models (#1030) Co-authored-by: AutoJenkins --- .../models/r101_eBarlow_Vanilla_1/__init__.py | 9 +++ .../models/r101_eBarlow_Vanilla_1/model.py | 64 +++++++++++++++++++ .../models/r101_eBarlow_Vanilla_1/setup.py | 25 ++++++++ .../models/r101_eBarlow_Vanilla_1/test.py | 1 + 4 files changed, 99 insertions(+) create mode 100644 brainscore_vision/models/r101_eBarlow_Vanilla_1/__init__.py create mode 100644 brainscore_vision/models/r101_eBarlow_Vanilla_1/model.py create mode 100644 brainscore_vision/models/r101_eBarlow_Vanilla_1/setup.py create mode 100644 brainscore_vision/models/r101_eBarlow_Vanilla_1/test.py diff --git a/brainscore_vision/models/r101_eBarlow_Vanilla_1/__init__.py b/brainscore_vision/models/r101_eBarlow_Vanilla_1/__init__.py new file mode 100644 index 000000000..1ea1f07c1 --- /dev/null +++ b/brainscore_vision/models/r101_eBarlow_Vanilla_1/__init__.py @@ -0,0 +1,9 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry["r101_eBarlow_Vanilla_1"] = lambda: ModelCommitment( + identifier="r101_eBarlow_Vanilla_1", + activations_model=get_model("r101_eBarlow_Vanilla_1"), + layers=get_layers("r101_eBarlow_Vanilla_1"), +) diff --git a/brainscore_vision/models/r101_eBarlow_Vanilla_1/model.py b/brainscore_vision/models/r101_eBarlow_Vanilla_1/model.py new file mode 100644 index 000000000..cf594e229 --- /dev/null +++ b/brainscore_vision/models/r101_eBarlow_Vanilla_1/model.py @@ -0,0 +1,64 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import os +from urllib.request import urlretrieve +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images +from pathlib import Path +from brainscore_vision.model_helpers import download_weights +import torch +from collections import OrderedDict + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. +from brainscore_vision.model_helpers.check_submission import check_models + + +def get_model_list(): + return ["r101_eBarlow_Vanilla_1"] + + +def get_model(name): + assert name == "r101_eBarlow_Vanilla_1" + url = "https://users.flatironinstitute.org/~tyerxa/equi_proj/training_checkpoints/resnet101/imagenet_1k/vanilla/Barlow_1/latest-rank0" + fh = urlretrieve(url) + state_dict = torch.load(fh[0], map_location=torch.device("cpu"))["state"]["model"] + model = load_composer_classifier(state_dict) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier=name, model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + +def load_composer_classifier(sd): + model = torchvision.models.resnet.resnet101() + new_sd = OrderedDict() + for k, v in sd.items(): + if 'lin_cls' in k: + new_sd['fc.' + k.split('.')[-1]] = v + if ".f." not in k: + continue + parts = k.split(".") + idx = parts.index("f") + new_k = ".".join(parts[idx + 1 :]) + new_sd[new_k] = v + model.load_state_dict(new_sd, strict=True) + return model + +def get_layers(name): + assert name == "r101_eBarlow_Vanilla_1" + + outs = ["layer4"] + return outs + + +def get_bibtex(model_identifier): + return """xx""" + + +if __name__ == "__main__": + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/r101_eBarlow_Vanilla_1/setup.py b/brainscore_vision/models/r101_eBarlow_Vanilla_1/setup.py new file mode 100644 index 000000000..421914cfb --- /dev/null +++ b/brainscore_vision/models/r101_eBarlow_Vanilla_1/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from setuptools import setup, find_packages + +requirements = [ "torchvision", + "torch" +] + +setup( + packages=find_packages(exclude=['tests']), + include_package_data=True, + install_requires=requirements, + license="MIT license", + zip_safe=False, + keywords='brain-score template', + classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Programming Language :: Python :: 3.7', + ], + test_suite='tests', +) diff --git a/brainscore_vision/models/r101_eBarlow_Vanilla_1/test.py b/brainscore_vision/models/r101_eBarlow_Vanilla_1/test.py new file mode 100644 index 000000000..e594ba9e1 --- /dev/null +++ b/brainscore_vision/models/r101_eBarlow_Vanilla_1/test.py @@ -0,0 +1 @@ +# Left empty as part of 2023 models migration From 92c702681a360be9674cde2d4d6fd3af95f41a9f Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Thu, 11 Jul 2024 11:03:46 +0200 Subject: [PATCH 63/68] fix use of precomputed ceiling in Maniquet2024 (#1034) --- .../benchmarks/maniquet2024/benchmark.py | 10 ++++----- .../metrics/maniquet2024_metrics/metric.py | 21 ++++--------------- 2 files changed, 9 insertions(+), 22 deletions(-) diff --git a/brainscore_vision/benchmarks/maniquet2024/benchmark.py b/brainscore_vision/benchmarks/maniquet2024/benchmark.py index bb9f22107..4a12054b7 100644 --- a/brainscore_vision/benchmarks/maniquet2024/benchmark.py +++ b/brainscore_vision/benchmarks/maniquet2024/benchmark.py @@ -5,13 +5,13 @@ @author: costantino_ai """ +from brainscore_core import Score from brainscore_vision.benchmarks import BenchmarkBase from brainscore_vision.benchmark_helpers.screen import place_on_screen from brainscore_vision.model_interface import BrainModel from brainscore_vision import load_stimulus_set, load_metric, load_dataset from brainscore_vision.utils import LazyLoad - BIBTEX = """@article {Maniquet2024.04.02.587669, author = {Maniquet, Tim and de Beeck, Hans Op and Costantino, Andrea Ivan}, title = {Recurrent issues with deep neural network models of visual recognition}, @@ -65,7 +65,7 @@ def __init__(self): super(_Maniquet2024ConfusionSimilarity, self).__init__( identifier="Maniquet2024-confusion_similarity'", version=1, - ceiling_func=lambda: self._metric._ceiling(self._assembly), + ceiling_func=lambda: Score(0.53526), # use pre-computed from `self._metric._ceiling(self._human_assembly)` parent="Maniquet2024", bibtex=BIBTEX, ) @@ -97,7 +97,7 @@ def __call__(self, candidate: BrainModel): # Compute the confusion similarity score between model probabilities and human assembly data raw_score = self._metric(probabilities, self._human_assembly) # Normalize by ceiling - ceiling = self._ceiling(self._human_assembly, precomputed=True) + ceiling = self.ceiling score = raw_score / ceiling score.attrs["raw"] = raw_score score.attrs["ceiling"] = ceiling @@ -145,7 +145,7 @@ def __init__(self): super(_Maniquet2024TasksConsistency, self).__init__( identifier="Maniquet2024-tasks_consistency", version=1, - ceiling_func=lambda: self._metric.ceiling(self._human_assembly), + ceiling_func=lambda: Score(0.99810), # use pre-computed from `self._metric.ceiling(self._human_assembly)` parent="Maniquet2024", bibtex=BIBTEX, ) @@ -184,7 +184,7 @@ def __call__(self, candidate: BrainModel): # Evaluate the consistency of model predictions with human data raw_score = self._metric(probabilities, self._human_assembly) # Normalize by ceiling - ceiling = self._ceiling(self._human_assembly, precomputed=True) + ceiling = self.ceiling score = raw_score / ceiling score.attrs["raw"] = raw_score score.attrs["ceiling"] = ceiling diff --git a/brainscore_vision/metrics/maniquet2024_metrics/metric.py b/brainscore_vision/metrics/maniquet2024_metrics/metric.py index 5f15f290d..a64651088 100644 --- a/brainscore_vision/metrics/maniquet2024_metrics/metric.py +++ b/brainscore_vision/metrics/maniquet2024_metrics/metric.py @@ -22,7 +22,7 @@ class ConfusionSimilarity(Metric): _rollout_matrix(matrix, remove_diagonal=True): Flattens a matrix into a vector, optionally removing diagonal elements. _label_from_probability(probabilities): Derives predicted labels from probabilities. _accuracy(y_true, y_pred): Calculates the accuracy of predictions. - _ceiling(assembly, precomputed=True): Computes the ceiling performance by assessing the highest correlation across subjects. + _ceiling(assembly): Computes the ceiling performance by assessing the highest correlation across subjects. __call__(probabilities, human_assembly): Computes the correlation between model and human confusion matrices normalized by the ceiling. """ @@ -86,23 +86,16 @@ def _accuracy(self, y_true, y_pred): """ return sum(y_true == y_pred) / len(y_pred) - def _ceiling(self, assembly, precomputed=True): + def _ceiling(self, assembly): """ Compute the noise ceiling of a confusion matrix using split-half correlations at the group level. Args: assembly: (Human) Assembly with expected columns 'predicted'and 'image_label'. - precomputed (Bool): If true, use precomputed ceiling measure to save time. Returns: score (float): Noise ceiling average. """ - if precomputed: - # This is to save quite a lot of time. It was precomputed on the Maniquet2024 - # human data assembly, which includes 218 participants tested on the - # Maniquet2024 stimulus set. - return 0.53526 - # Get labels and subjects lists labels = list(set(assembly.image_label.values)) subjects = self._extract_subjects(assembly) @@ -191,7 +184,7 @@ class TasksConsistency(Metric): _rollout_matrix(matrix, remove_diagonal=True): Flattens a matrix into a vector, optionally removing diagonal elements. _label_from_probability(probabilities): Derives predicted labels from probabilities. _accuracy(y_true, y_pred): Calculates the accuracy of predictions. - _ceiling(assembly, precomputed=True): Computes the ceiling performance by assessing the highest correlation across subjects. + _ceiling(assembly): Computes the ceiling performance by assessing the highest correlation across subjects. _map_human_to_dnn_categories(human_task): Maps a human task name to the corresponding DNN categories of 'manipulation' and 'manipulation_details'. __call__(probabilities, human_assembly): Computes the correlation between model and human confusion matrices normalized by the ceiling. """ @@ -268,22 +261,16 @@ def _accuracy(self, y_true, y_pred): """ return sum(y_true == y_pred) / len(y_pred) - def _ceiling(self, assembly, precomputed=True): + def _ceiling(self, assembly): """ Computes the ceiling performance by assessing the average split-half correlation across subjects. Args: assembly (xarray.Dataset): The data assembly containing subject data. - precomputed (bool): Whether to use precomputed ceiling value. Returns: Score: The average correlation score across all subject pairs. """ - if precomputed: - # This precomputed value is based on the Maniquet2024 human data assembly, - # which includes 218 participants tested on the Maniquet2024 stimulus set. - return 0.99810 - # Initialize an empty list to store correlations for each iteration iter_task_correlations = [] From 2b65456835e4f32a88bf5f1b2fb1077c20b2437f Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Thu, 11 Jul 2024 15:21:37 +0200 Subject: [PATCH 64/68] add oddoneout readout; deal with no-dim coords (#1035) --- .../brain_transformation/behavior.py | 8 ++-- .../models/cornet_s/helpers/helpers.py | 38 ++++++++++++------- 2 files changed, 28 insertions(+), 18 deletions(-) diff --git a/brainscore_vision/model_helpers/brain_transformation/behavior.py b/brainscore_vision/model_helpers/brain_transformation/behavior.py index 49b85d83d..525079c84 100644 --- a/brainscore_vision/model_helpers/brain_transformation/behavior.py +++ b/brainscore_vision/model_helpers/brain_transformation/behavior.py @@ -317,10 +317,10 @@ def calculate_similarity_matrix(self, features): f"Unknown similarity_measure {self.similarity_measure} -- expected one of 'dot' or 'cosine'") similarity_matrix = DataAssembly(similarity_matrix, coords={ - **{f"{coord}_left": ('presentation_left', values) for coord, _, values in - walk_coords(features['presentation'])}, - **{f"{coord}_right": ('presentation_right', values) for coord, _, values in - walk_coords(features['presentation'])} + **{f"{coord}_left": ('presentation_left', values) for coord, dims, values in + walk_coords(features) if array_is_element(dims, 'presentation')}, + **{f"{coord}_right": ('presentation_right', values) for coord, dims, values in + walk_coords(features) if array_is_element(dims, 'presentation')} }, dims=['presentation_left', 'presentation_right']) return similarity_matrix diff --git a/brainscore_vision/models/cornet_s/helpers/helpers.py b/brainscore_vision/models/cornet_s/helpers/helpers.py index 5896135f3..a6f849560 100644 --- a/brainscore_vision/models/cornet_s/helpers/helpers.py +++ b/brainscore_vision/models/cornet_s/helpers/helpers.py @@ -1,14 +1,17 @@ +import re from collections import defaultdict from typing import Dict, Tuple -from brainscore_vision.model_helpers.brain_transformation.behavior import BehaviorArbiter, LogitsBehavior, ProbabilitiesMapping -from result_caching import store + +import numpy as np from tqdm import tqdm + +from brainio.assemblies import merge_data_arrays, NeuroidAssembly, walk_coords from brainscore_vision.model_helpers.activations.core import ActivationsExtractorHelper from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.brain_transformation.behavior import BehaviorArbiter, LogitsBehavior, \ + ProbabilitiesMapping, OddOneOut from brainscore_vision.model_interface import BrainModel -from brainio.assemblies import merge_data_arrays, NeuroidAssembly, walk_coords -import re -import numpy as np +from result_caching import store class TemporalPytorchWrapper(PytorchWrapper): @@ -67,7 +70,6 @@ def __init__(self, identifier, activations_model, layers, :param time_mapping: mapping from region -> {model_timestep -> (time_bin_start, time_bin_end)} """ self.layers = layers - self.region_assemblies = {} self.activations_model = activations_model self.time_mapping = time_mapping self.recording_layers = None @@ -79,8 +81,12 @@ def __init__(self, identifier, activations_model, layers, behavioral_readout_layer = behavioral_readout_layer or layers[-1] probabilities_behavior = ProbabilitiesMapping( identifier=identifier, activations_model=TemporalIgnore(activations_model), layer=behavioral_readout_layer) + odd_one_out = OddOneOut(identifier=identifier, activations_model=TemporalIgnore(activations_model), + layer=behavioral_readout_layer) self.behavior_model = BehaviorArbiter({BrainModel.Task.label: logits_behavior, - BrainModel.Task.probabilities: probabilities_behavior}) + BrainModel.Task.probabilities: probabilities_behavior, + BrainModel.Task.odd_one_out: odd_one_out, + }) self.do_behavior = False self._visual_degrees = visual_degrees @@ -101,16 +107,20 @@ def start_task(self, task: BrainModel.Task, *args, **kwargs): self.behavior_model.start_task(task, *args, **kwargs) self.do_behavior = True - def look_at(self, stimuli, number_of_trials=1): + def look_at(self, stimuli, number_of_trials: int = 1, require_variance: bool = False): if self.do_behavior: - return self.behavior_model.look_at(stimuli) + return self.behavior_model.look_at(stimuli, + number_of_trials=number_of_trials, require_variance=require_variance) else: # cache, since piecing times together is not too fast unfortunately - return self.look_at_cached(self.identifier, stimuli.identifier, stimuli) - - @store(identifier_ignore=['stimuli']) - def look_at_cached(self, model_identifier, stimuli_identifier, stimuli): - responses = self.activations_model(stimuli, layers=self.recording_layers) + return self.look_at_cached(self.identifier, stimuli.identifier, stimuli, + number_of_trials=number_of_trials, require_variance=require_variance) + + @store(identifier_ignore=['stimuli', 'number_of_trials', 'require_variance']) + def look_at_cached(self, model_identifier, stimuli_identifier, stimuli, + number_of_trials, require_variance): + responses = self.activations_model(stimuli, layers=self.recording_layers, + number_of_trials=number_of_trials, require_variance=require_variance) # map time regions = set(responses['region'].values) if len(regions) > 1: From 490346cf9023c35bc310b91cb40f20d597f6757a Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Thu, 11 Jul 2024 15:21:59 +0200 Subject: [PATCH 65/68] fix mobilenet layers and 1001 logits (#1036) --- .../mobilenet_v2_1_4_224_pytorch/model.py | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py b/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py index 0901970df..e07f14f72 100644 --- a/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py +++ b/brainscore_vision/models/mobilenet_v2_1_4_224_pytorch/model.py @@ -15,17 +15,20 @@ Disclaimer: This (pytorch) implementation's Brain-Score scores might not align identically with Tensorflow implementation. - ''' -MODEL = MobileNetV2ForImageClassification.from_pretrained("Matthijs/mobilenet_v2_1.4_224") - - def get_model(name): assert name == 'mobilenet_v2_1-4_224_pytorch' + model = MobileNetV2ForImageClassification.from_pretrained("Matthijs/mobilenet_v2_1.4_224") + + # this mobilenet was trained with 1001 classes where index 0 is the background class + # (https://huggingface.co/docs/transformers/en/model_doc/mobilenet_v2) + classifier_layer = model.classifier + classifier_layer.register_forward_hook(lambda _layer, _input, logits: logits[:, 1:]) + preprocessing = functools.partial(load_preprocess_images, image_size=224) - wrapper = PytorchWrapper(identifier='mobilenet_v2_1-4_224_pytorch', model=MODEL, + wrapper = PytorchWrapper(identifier='mobilenet_v2_1-4_224_pytorch', model=model, preprocessing=preprocessing, batch_size=4) # doesn't fit into 12 GB GPU memory otherwise wrapper.image_size = 224 @@ -34,12 +37,8 @@ def get_model(name): def get_layers(name): assert name == 'mobilenet_v2_1-4_224_pytorch' - layer_names = [] - - for name, module in MODEL.named_modules(): - layer_names.append(name) - - return layer_names[-50:] + layer_names = ['mobilenet_v2.conv_stem'] + [f'mobilenet_v2.layer.{i}' for i in range(16)] + ['pooler', 'classifier'] + return layer_names def get_bibtex(model_identifier): From 43da718dc3bb092efb82e25e0eb5ee7147c40299 Mon Sep 17 00:00:00 2001 From: Sam Winebrake <85908068+samwinebrake@users.noreply.github.com> Date: Thu, 11 Jul 2024 10:52:57 -0400 Subject: [PATCH 66/68] remove unnecessary requirements (#1038) --- brainscore_vision/models/hmax/requirements.txt | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/brainscore_vision/models/hmax/requirements.txt b/brainscore_vision/models/hmax/requirements.txt index c2afe6a3a..3868fb16b 100644 --- a/brainscore_vision/models/hmax/requirements.txt +++ b/brainscore_vision/models/hmax/requirements.txt @@ -1,6 +1 @@ -torchvision -torch -numpy -scipy -logging -pillow \ No newline at end of file +pillow From def245cd3b41e7dab46d611a95f7ca21929a9176 Mon Sep 17 00:00:00 2001 From: Martin Schrimpf Date: Thu, 11 Jul 2024 17:06:17 +0200 Subject: [PATCH 67/68] speed up indexing 3k-fold (#1039) (4h to 5s for CORnet) --- .../model_helpers/brain_transformation/behavior.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/brainscore_vision/model_helpers/brain_transformation/behavior.py b/brainscore_vision/model_helpers/brain_transformation/behavior.py index 525079c84..f9de42485 100644 --- a/brainscore_vision/model_helpers/brain_transformation/behavior.py +++ b/brainscore_vision/model_helpers/brain_transformation/behavior.py @@ -326,12 +326,16 @@ def calculate_similarity_matrix(self, features): def calculate_choices(self, similarity_matrix, triplets): triplets = np.array(triplets).reshape(-1, 3) + for leftright in ['left', 'right']: + # indexing via `.sel(stimulus_id_left=..., stimulus_id_right=...)` is slow. + # If ids are in order, we can directly index into the values + assert all(index == stimulus_id for index, stimulus_id in enumerate(similarity_matrix[f'stimulus_id_{leftright}'].values)) choice_predictions = [] for triplet in triplets: i, j, k = triplet - sims = [similarity_matrix.sel(stimulus_id_left=i, stimulus_id_right=j), - similarity_matrix.sel(stimulus_id_left=i, stimulus_id_right=k), - similarity_matrix.sel(stimulus_id_left=j, stimulus_id_right=k)] + sims = [similarity_matrix.values[i, j].item(), + similarity_matrix.values[i, k].item(), + similarity_matrix.values[j, k].item()] idx = triplet[2 - np.argmax(sims)] choice_predictions.append(idx) return choice_predictions From 446a5148035edd4589b1e9e4d8625cca60cec9bf Mon Sep 17 00:00:00 2001 From: Michael Ferguson Date: Thu, 11 Jul 2024 14:51:12 -0400 Subject: [PATCH 68/68] Add resnet50_tutorial to model registry (#1014) * Add resnet50_tutorial to model registry * resnet50_tutorial now inline with 2.0 model packaging * remove setup.py and add requirements.txt * add standard test and make sure model identifiers line up * Update brainscore_vision/models/resnet50_tutorial/model.py Co-authored-by: Martin Schrimpf --------- Co-authored-by: Martin Schrimpf --- .../models/resnet50_tutorial/__init__.py | 5 +++ .../models/resnet50_tutorial/model.py | 34 +++++++++++++++++++ .../models/resnet50_tutorial/requirements.txt | 2 ++ .../models/resnet50_tutorial/test.py | 8 +++++ 4 files changed, 49 insertions(+) create mode 100644 brainscore_vision/models/resnet50_tutorial/__init__.py create mode 100644 brainscore_vision/models/resnet50_tutorial/model.py create mode 100644 brainscore_vision/models/resnet50_tutorial/requirements.txt create mode 100644 brainscore_vision/models/resnet50_tutorial/test.py diff --git a/brainscore_vision/models/resnet50_tutorial/__init__.py b/brainscore_vision/models/resnet50_tutorial/__init__.py new file mode 100644 index 000000000..4210ed635 --- /dev/null +++ b/brainscore_vision/models/resnet50_tutorial/__init__.py @@ -0,0 +1,5 @@ +from brainscore_vision import model_registry +from brainscore_vision.model_helpers.brain_transformation import ModelCommitment +from .model import get_model, get_layers + +model_registry['resnet50_tutorial'] = lambda: ModelCommitment(identifier='resnet50_tutorial', activations_model=get_model('resnet50_tutorial'), layers=get_layers('resnet50_tutorial')) diff --git a/brainscore_vision/models/resnet50_tutorial/model.py b/brainscore_vision/models/resnet50_tutorial/model.py new file mode 100644 index 000000000..3103d9583 --- /dev/null +++ b/brainscore_vision/models/resnet50_tutorial/model.py @@ -0,0 +1,34 @@ +from brainscore_vision.model_helpers.check_submission import check_models +import functools +import torchvision.models +from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper +from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images + +# This is an example implementation for submitting resnet-50 as a pytorch model + +# Attention: It is important, that the wrapper identifier is unique per model! +# The results will otherwise be the same due to brain-scores internal result caching mechanism. +# Please load your pytorch model for usage in CPU. There won't be GPUs available for scoring your model. +# If the model requires a GPU, contact the brain-score team directly. + + +def get_model(name): + assert name == 'resnet50_tutorial' + model = torchvision.models.resnet50(pretrained=True) + preprocessing = functools.partial(load_preprocess_images, image_size=224) + wrapper = PytorchWrapper(identifier='resnet50_tutorial', model=model, preprocessing=preprocessing) + wrapper.image_size = 224 + return wrapper + + +def get_layers(name): + assert name == 'resnet50_tutorial' + return ['conv1','layer1', 'layer2', 'layer3', 'layer4', 'fc'] + + +def get_bibtex(model_identifier): + return """""" + + +if __name__ == '__main__': + check_models.check_base_models(__name__) diff --git a/brainscore_vision/models/resnet50_tutorial/requirements.txt b/brainscore_vision/models/resnet50_tutorial/requirements.txt new file mode 100644 index 000000000..a56666d38 --- /dev/null +++ b/brainscore_vision/models/resnet50_tutorial/requirements.txt @@ -0,0 +1,2 @@ +torchvision +torch \ No newline at end of file diff --git a/brainscore_vision/models/resnet50_tutorial/test.py b/brainscore_vision/models/resnet50_tutorial/test.py new file mode 100644 index 000000000..28c13e09b --- /dev/null +++ b/brainscore_vision/models/resnet50_tutorial/test.py @@ -0,0 +1,8 @@ +import pytest +import brainscore_vision + + +@pytest.mark.travis_slow +def test_has_identifier(): + model = brainscore_vision.load_model('resnet50_tutorial') + assert model.identifier == 'resnet50_tutorial'