diff --git a/.github/dependency_review.yml b/.github/dependency_review.yml index 408a1c279bc39a..2a33abe533ecc8 100644 --- a/.github/dependency_review.yml +++ b/.github/dependency_review.yml @@ -16,3 +16,6 @@ fail-on-scopes: - 'unknown' license-check: true vulnerability-check: true +allow-dependencies-licenses: + - 'pkg:pypi/PyGithub@2.2.0' + - 'pkg:pypi/psycopg2-binary' diff --git a/.github/scripts/collect_github_metrics.py b/.github/scripts/collect_github_metrics.py new file mode 100644 index 00000000000000..d933fa0f927987 --- /dev/null +++ b/.github/scripts/collect_github_metrics.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python3 + +from github import Github +from psycopg2 import sql +import os +import logging +import psycopg2 +import dateutil + +def init_logger(): + LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper() + logging.basicConfig(level=LOGLEVEL, + format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', + datefmt='%m-%d-%Y %H:%M:%S') + +def create_db_tables(conn, cur): + cur.execute('''CREATE TABLE IF NOT EXISTS github_workflow_runs_test( + id SERIAL, + run_id BIGINT PRIMARY KEY, + html_url TEXT, + name VARCHAR(255), + run_started_at TIMESTAMP, + triggering_actor_login VARCHAR(255), + conclusion VARCHAR(25), + run_number INT, + event VARCHAR(50), + run_attempt INT, + repository_full_name VARCHAR(255), + head_repository_full_name VARCHAR(255), + head_branch VARCHAR(255), + status VARCHAR(25), + display_title TEXT, + path TEXT + ); + ''') + cur.execute('''CREATE TABLE IF NOT EXISTS github_workflow_jobs_test( + id SERIAL, + job_id BIGINT PRIMARY KEY, + parent_run_id BIGINT REFERENCES github_workflow_runs_test(run_id), + html_url TEXT, + name VARCHAR(255), + created_at TIMESTAMP, + started_at TIMESTAMP, + completed_at TIMESTAMP, + queued_duration_seconds INT, + duration_seconds INT, + runner_name VARCHAR(255), + status VARCHAR(25), + conclusion VARCHAR(25), + head_branch VARCHAR(255) + ); + ''') + cur.execute('''CREATE TABLE IF NOT EXISTS github_workflow_steps_test( + id SERIAL PRIMARY KEY, + parent_job_id BIGINT REFERENCES github_workflow_jobs_test(job_id), + name VARCHAR(255), + conclusion VARCHAR(25), + number INT, + started_at TIMESTAMP, + completed_at TIMESTAMP, + duration_seconds INT + ); + ''') + conn.commit() + +def main(): + init_logger() + + logger = logging.getLogger(__name__) + + github_token = os.environ.get('GITHUB_TOKEN') + if not github_token: + raise ValueError('GITHUB_TOKEN environment variable is not set!') + + run_id = os.environ.get('RUN_ID') + if not run_id: + raise ValueError('RUN_ID environment variable is not set!') + + repo_name = os.environ.get('GITHUB_REPOSITORY') + if not repo_name: + raise ValueError('GITHUB_REPOSITORY environment variable is not set!') + + + # this should be specified in runner's env + db_username = os.environ.get('PGUSER') + db_password = os.environ.get('PGPASSWORD') + db_host = os.environ.get('PGHOST') + db_database = os.environ.get('PGDATABASE') + db_port = os.environ.get('PGPORT') + conn = psycopg2.connect(host=db_host, + port=db_port, + user=db_username, + password=db_password, + database=db_database) + + # Create tables + cur = conn.cursor() + create_db_tables(conn, cur) + + # Get the data + g = Github(github_token) + repo = g.get_repo(repo_name) + + run = repo.get_workflow_run(int(run_id)) + + workflow_data_query = f'''INSERT INTO github_workflow_runs_test( + run_id, html_url, name, + run_started_at, triggering_actor_login, conclusion, + run_number, event, run_attempt, repository_full_name, + head_branch, display_title, path) + VALUES( + '{run_id}', '{run.html_url}', '{run.name}', '{run.run_started_at}', + '{run.raw_data['triggering_actor']['login']}', + '{run.conclusion}', '{run.run_number}', '{run.event}', + '{run.run_attempt}', '{run.raw_data['repository']['full_name']}', + '{run.head_branch}', '{run.display_title}', '{run.path}' + ); + ''' + + logger.debug('Workflow run query: %s', workflow_data_query) + cur.execute(workflow_data_query) + + for job in run.jobs(): + job_id = job.id + queued_duration_seconds = 0 + duration_seconds = 0 + + job_created_at_date = dateutil.parser.parse(job.raw_data['created_at']) + + queued_duration_timedelta = job.started_at - job_created_at_date + queued_duration_seconds = round(queued_duration_timedelta.total_seconds()) + + duration_timedelta = job.completed_at - job.started_at + duration_seconds = round(duration_timedelta.total_seconds()) + + job_data_query = f''' + INSERT INTO github_workflow_jobs_test( + job_id, parent_run_id, html_url, name, + created_at, started_at, completed_at, + queued_duration_seconds, duration_seconds, + runner_name, status, conclusion, head_branch) + VALUES( + '{job_id}', '{run_id}', '{job.html_url}', '{job.name}', + '{job.raw_data['created_at']}', '{job.started_at}', '{job.completed_at}', + '{queued_duration_seconds}', '{duration_seconds}', + '{job.raw_data['runner_name']}', '{job.status}', '{job.conclusion}', + '{job.raw_data['head_branch']}' + ); + ''' + logger.debug('Job query: %s', job_data_query) + cur.execute(job_data_query) + for step in job.steps: + duration_seconds_timedelta = step.completed_at - step.started_at + duration_seconds = round(duration_seconds_timedelta.total_seconds()) + + step_data_query = f''' + INSERT INTO github_workflow_steps_test( + parent_job_id, name, conclusion, + number, started_at, completed_at, + duration_seconds) + VALUES( + '{job_id}', '{step.name}','{step.conclusion}', + '{step.number}', '{step.started_at}', '{step.completed_at}', + '{duration_seconds}' + ); + ''' + logger.debug('Step query: %s', step_data_query) + cur.execute(step_data_query) + + conn.commit() + cur.close() + conn.close() + g.close() +if __name__ == "__main__": + main() diff --git a/.github/scripts/requirements.txt b/.github/scripts/requirements.txt new file mode 100644 index 00000000000000..04a2df829c0ed4 --- /dev/null +++ b/.github/scripts/requirements.txt @@ -0,0 +1 @@ +python-dateutil==2.9.0.post0 diff --git a/.github/workflows/dependency_review.yml b/.github/workflows/dependency_review.yml index 777198358c78f1..f757d4b9e5207d 100644 --- a/.github/workflows/dependency_review.yml +++ b/.github/workflows/dependency_review.yml @@ -15,3 +15,4 @@ jobs: uses: actions/dependency-review-action@v4 with: config-file: './.github/dependency_review.yml' + allow-dependencies-licenses: 'pkg:pypi/PyGithub@2.2.0,pkg:pypi/psycopg2-binary' diff --git a/.github/workflows/linux_sanitizers.yml b/.github/workflows/linux_sanitizers.yml new file mode 100644 index 00000000000000..575daf51daea26 --- /dev/null +++ b/.github/workflows/linux_sanitizers.yml @@ -0,0 +1,468 @@ +name: Linux Sanitizers (Ubuntu 20.04, Python 3.11) +on: + schedule: + # run daily at 00:00 + - cron: '0 0 * * *' + workflow_dispatch: + # pull_request: + +concurrency: + # github.ref is not unique in post-commit + group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-linux-sanitizers + cancel-in-progress: true + +env: + PIP_CACHE_PATH: /mount/caches/pip/linux + PYTHON_VERSION: '3.11' + +jobs: + Build: + timeout-minutes: 500 + defaults: + run: + shell: bash + runs-on: aks-linux-16-cores-32gb + container: + image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 + volumes: + - /mount:/mount + strategy: + max-parallel: 3 + fail-fast: false + matrix: + include: + - SANITIZER: 'AddressAndLeak' + SANITIZER_CMAKE_OPTION: '-DENABLE_SANITIZER=ON' + - SANITIZER: 'UndefinedBehavior' + SANITIZER_CMAKE_OPTION: '-DENABLE_UB_SANITIZER=ON' +# - SANITIZER: 'Thread' # Problems with protobuf +# SANITIZER_CMAKE_OPTION: '-DENABLE_THREAD_SANITIZER=ON' + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + CMAKE_BUILD_TYPE: 'Release' + CMAKE_GENERATOR: 'Ninja Multi-Config' + GITHUB_WORKSPACE: '/__w/openvino/openvino' + OPENVINO_REPO: /__w/openvino/openvino/openvino + OPENVINO_CONTRIB_REPO: /__w/openvino/openvino/openvino_contrib + INSTALL_DIR: /__w/openvino/openvino/openvino_install + INSTALL_TEST_DIR: /__w/openvino/openvino/tests_install + BUILD_DIR: /__w/openvino/openvino/openvino_build + LSAN_IGNORE: /__w/openvino/openvino/openvino/tests/lsan/suppressions.txt + ASAN_IGNORE: /__w/openvino/openvino/openvino/tests/asan/suppressions.supp + CXX: clang++ + CC: clang + + steps: + - name: Set apt retries + run: echo 'Acquire::Retries "10";' > /etc/apt/apt.conf.d/80-retries + + - name: Install git + run: | + apt-get update + apt-get install --assume-yes --no-install-recommends git ca-certificates + + - name: Clone OpenVINO + uses: actions/checkout@v4 + with: + path: ${{ env.OPENVINO_REPO }} + submodules: 'true' + + - name: Clone OpenVINO Contrib + uses: actions/checkout@v4 + with: + repository: 'openvinotoolkit/openvino_contrib' + path: ${{ env.OPENVINO_CONTRIB_REPO }} + submodules: 'true' + ref: 'master' + + # + # Print system info + # + + - name: System info + uses: ./openvino/.github/actions/system_info + + # + # Dependencies + # + + - name: Install build dependencies + run: | + bash ${OPENVINO_REPO}/install_build_dependencies.sh + apt --assume-yes install clang lld + + - name: Setup Python ${{ env.PYTHON_VERSION }} + uses: ./openvino/.github/actions/setup_python + with: + version: ${{ env.PYTHON_VERSION }} + pip-cache-path: ${{ env.PIP_CACHE_PATH }} + should-setup-pip-paths: 'true' + self-hosted-runner: 'true' + show-cache-info: 'true' + + - name: Install python dependencies + run: | + # For Python API: build and wheel packaging + python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt + + # For running ONNX frontend unit tests + python3 -m pip install --force-reinstall -r ${OPENVINO_REPO}/src/frontends/onnx/tests/requirements.txt + + # For running TensorFlow frontend unit tests + python3 -m pip install -r ${OPENVINO_REPO}/src/frontends/tensorflow/tests/requirements.txt + + # For running TensorFlow Lite frontend unit tests + python3 -m pip install -r ${OPENVINO_REPO}/src/frontends/tensorflow_lite/tests/requirements.txt + + # For running Paddle frontend unit tests + python3 -m pip install -r ${OPENVINO_REPO}/src/frontends/paddle/tests/requirements.txt + + # + # Build + # + + - name: CMake configure - OpenVINO + run: | + export ASAN_OPTIONS=halt_on_error=0:suppressions=${ASAN_IGNORE} + export LSAN_OPTIONS=suppressions=${LSAN_IGNORE}:NEOReadDebugKeys=1:DisableDeepBind=1 + export CC=clang + export CXX=clang++ + cmake \ + -G "${{ env.CMAKE_GENERATOR }}" \ + -DENABLE_CPPLINT=OFF \ + -DENABLE_NCC_STYLE=OFF \ + -DENABLE_TESTS=ON \ + -DENABLE_STRICT_DEPENDENCIES=OFF \ + -DENABLE_SYSTEM_TBB=ON \ + -DENABLE_SYSTEM_OPENCL=ON \ + -DCMAKE_VERBOSE_MAKEFILE=ON \ + -DCPACK_GENERATOR=TGZ \ + -DBUILD_SHARED_LIBS=ON \ + -DENABLE_OV_TF_FRONTEND=ON \ + -DENABLE_OV_TF_LITE_FRONTEND=ON \ + -DENABLE_OV_PADDLE_FRONTEND=ON \ + -DENABLE_OV_PYTORCH_FRONTEND=ON \ + -DENABLE_OV_ONNX_FRONTEND=ON \ + -DENABLE_ONEDNN_FOR_GPU=OFF \ + -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \ + ${{ matrix.SANITIZER_CMAKE_OPTION }} \ + -S ${OPENVINO_REPO} \ + -B ${BUILD_DIR} + + - name: Cmake build - OpenVINO + run: | + export ASAN_OPTIONS=halt_on_error=0:suppressions=${ASAN_IGNORE} + export LSAN_OPTIONS=suppressions=${LSAN_IGNORE}:NEOReadDebugKeys=1:DisableDeepBind=1 + cmake --build ${BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + + - name: Cmake install - OpenVINO + run: | + cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} -P ${BUILD_DIR}/cmake_install.cmake + cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_TEST_DIR} -DCOMPONENT=tests -P ${BUILD_DIR}/cmake_install.cmake + cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} -DCOMPONENT=python_wheels -P ${BUILD_DIR}/cmake_install.cmake + + - name: Remove unused files to free space + run: rm -rf ${BUILD_DIR}/* + + # + # Tests + # + + - name: Pack Artifacts + run: | + pushd ${INSTALL_DIR} + tar -czvf ${BUILD_DIR}/openvino_package.tar.gz * + popd + + pushd ${INSTALL_TEST_DIR} + tar -czvf ${BUILD_DIR}/openvino_tests.tar.gz * + popd + + # + # Upload build artifacts + # + + - name: Upload openvino package + if: ${{ always() }} + uses: actions/upload-artifact@v4 + with: + name: openvino_package_${{ matrix.SANITIZER }} + path: ${{ env.BUILD_DIR }}/openvino_package.tar.gz + if-no-files-found: 'error' + + - name: Upload openvino tests package + if: ${{ always() }} + uses: actions/upload-artifact@v4 + with: + name: openvino_tests_${{ matrix.SANITIZER }} + path: ${{ env.BUILD_DIR }}/openvino_tests.tar.gz + if-no-files-found: 'error' + + CXX_Unit_Tests: + name: C++ unit tests + if: always() + needs: Build + timeout-minutes: 100 + runs-on: 'aks-linux-16-cores-32gb' + container: + image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04' + defaults: + run: + shell: bash + strategy: + max-parallel: 3 + fail-fast: false + matrix: + include: + - SANITIZER: 'AddressAndLeak' +# - SANITIZER: 'UndefinedBehavior' # the UB library is not linked properly at the Build stage +# - SANITIZER: 'Thread' # Problems with protobuf at the Build stage + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + CC: clang + CXX: clang++ + steps: + - name: Set apt retries + run: echo 'Acquire::Retries "10";' > /etc/apt/apt.conf.d/80-retries + + - name: Download OpenVINO package + uses: actions/download-artifact@v4 + with: + name: ${{ format('openvino_package_{0}', matrix.SANITIZER) }} + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@v4 + with: + name: ${{ format('openvino_tests_{0}', matrix.SANITIZER) }} + path: ${{ env.INSTALL_TEST_DIR }} + + # Needed as ${{ github.workspace }} is not working correctly when using Docker + - name: Setup Variables + continue-on-error: true + run: | + echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV" + echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV" + + echo "ASAN_OPTIONS=halt_on_error=0:suppressions=$GITHUB_WORKSPACE/openvino/tests/asan/suppressions.supp" >> "$GITHUB_ENV" + echo "LSAN_OPTIONS=suppressions=$GITHUB_WORKSPACE/openvino/tests/lsan/suppressions.txt:NEOReadDebugKeys=1:DisableDeepBind=1" >> "$GITHUB_ENV" + + - name: Extract OpenVINO packages + run: | + pushd $INSTALL_DIR + tar -xzf openvino_package.tar.gz -C $INSTALL_DIR + popd + pushd $INSTALL_TEST_DIR + tar -xzf openvino_tests.tar.gz -C $INSTALL_DIR + popd + + - name: Install dependencies (Linux) + run: | + $INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -c=gpu -y + apt update && apt --assume-yes install clang lld + + - name: Fetch Sanitizer Suppression Lists + uses: actions/checkout@v4 + with: + sparse-checkout: | + tests/lsan/suppressions.txt + tests/asan/suppressions.supp + sparse-checkout-cone-mode: false + path: 'openvino' + + # + # Tests + # + + - name: OpenVINO Core Unit Tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVCoreUT.xml + + - name: OpenVINO Inference Functional Tests + if: ${{ 'false' }} # Ticket: 134410 + run: | + source ${INSTALL_DIR}/setupvars.sh + + ${INSTALL_TEST_DIR}/ov_inference_functional_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceFunc.xml + + - name: OpenVINO Inference Unit Tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_inference_unit_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceUnit.xml + + - name: Low Precision Transformations Tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + + ${INSTALL_TEST_DIR}/ov_lp_transformations_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-LpTransformations.xml + + - name: OpenVINO Conditional compilation tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_conditional_compilation_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ConditionalCompilation.xml + + - name: IR frontend tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_ir_frontend_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-IRFrontend.xml + + - name: PaddlePaddle frontend tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/paddle_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-PaddleTests.xml + + - name: ONNX frontend tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_onnx_frontend_tests --gtest_print_time=1 \ + --gtest_filter=-*IE_GPU* \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ONNXFrontend.xml + + - name: TensorFlow Common frontend tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_tensorflow_common_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowCommonFrontend.xml + + - name: TensorFlow frontend tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + + ${INSTALL_TEST_DIR}/ov_tensorflow_frontend_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowFrontend.xml + + - name: TensorFlow Lite frontend tests + if: ${{ 'false' }} # Ticket: 134416 + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowLiteFrontend.xml + + - name: Transformations func tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + + ${INSTALL_TEST_DIR}/ov_transformations_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-Transformations.xml + + - name: Common test utils tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_util_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-CommonUtilTests.xml + + - name: Snippets func tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_snippets_func_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-SnippetsFuncTests.xml + + - name: CPU plugin unit tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_cpu_unit_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-CPUUnitTests.xml + + - name: ov_subgraphs_dumper_tests tests + if: ${{ 'false' }} # Ticket: 134419 + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_subgraphs_dumper_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_subgraphs_dumper_tests.xml + + - name: Template OpImpl tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_op_conformance_tests --gtest_print_time=1 --device=TEMPLATE --gtest_filter=*OpImpl*\ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OpImplTests.xml + + - name: AUTO unit tests + if: ${{ 'false' }} # Ticket: 134423 + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_auto_unit_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_unit_tests.xml + + - name: AUTO func Tests + if: always() + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml + + - name: Template plugin func tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_template_func_tests --gtest_print_time=1 \ + --gtest_filter=*smoke* \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TemplateFuncTests.xml + + - name: OpenVINO C API tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_capi_test --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OpenVINOCAPITests.xml + + - name: AutoBatch unit tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_auto_batch_unit_tests --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_batch_unit_tests.xml + + - name: AutoBatch func tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_auto_batch_func_tests --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_batch_func_tests.xml + + - name: Proxy Plugin func tests + if: always() + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVProxyTests.xml + + - name: Hetero unit tests + if: always() + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroUnitTests.xml + + - name: Hetero func tests + if: ${{ 'false' }} # Ticket: 134425 + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVHeteroFuncTests.xml + + - name: Upload Test Results + uses: actions/upload-artifact@v4 + if: ${{ !cancelled() }} + with: + name: test-results-cpp + path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml + if-no-files-found: 'error' diff --git a/.github/workflows/send_workflows_to_opentelemetry.yml b/.github/workflows/send_workflows_to_opentelemetry.yml index 42cddd7b88d9dd..59146bda517182 100644 --- a/.github/workflows/send_workflows_to_opentelemetry.yml +++ b/.github/workflows/send_workflows_to_opentelemetry.yml @@ -1,4 +1,4 @@ -name: Send workflows to OpenTelemetry (BETA) +name: Export workflow metrics (BETA) on: workflow_run: @@ -29,14 +29,35 @@ permissions: read-all jobs: otel-export-trace: - name: OpenTelemetry Export Trace - runs-on: ubuntu-latest + name: Export finished workflow metrics + runs-on: aks-linux-2-cores-8gb steps: - - name: Export Workflow Trace + - name: Export Workflow Trace to Honeycomb uses: inception-health/otel-export-trace-action@7eabc7de1f4753f0b45051b44bb0ba46d05a21ef with: otlpEndpoint: grpc://api.honeycomb.io:443/ otlpHeaders: ${{ secrets.OTLP_HEADERS }} githubToken: ${{ secrets.GITHUB_TOKEN }} runId: ${{ github.event.workflow_run.id }} + + - name: Checkout + uses: actions/checkout@v4 + with: + sparse-checkout: '.github' + + - name: Install deps + run: | + pip3 install -r .github/scripts/requirements.txt + pip3 install PyGithub==2.2.0 psycopg2-binary==2.9.9 + + - name: Send metrics to SQL database + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + RUN_ID: ${{ github.event.workflow_run.id }} + PGHOST: ${{ secrets.METRICS_DATABASE_HOST }} + PGUSER: ${{ secrets.METRICS_DATABASE_USERNAME }} + PGPASSWORD: ${{ secrets.METRICS_DATABASE_PASSWORD }} + PGPORT: 5432 + run: | + python3 .github/scripts/collect_github_metrics.py diff --git a/cmake/developer_package/compile_flags/sanitizer.cmake b/cmake/developer_package/compile_flags/sanitizer.cmake index 908a1b844d2d34..5927aec9cc2d5b 100644 --- a/cmake/developer_package/compile_flags/sanitizer.cmake +++ b/cmake/developer_package/compile_flags/sanitizer.cmake @@ -98,16 +98,15 @@ if(DEFINED SANITIZER_COMPILER_FLAGS) # prevent unloading libraries at runtime, so sanitizer can resolve their symbols if(NOT OV_COMPILER_IS_APPLECLANG) set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -Wl,-z,nodelete") - - if(OV_COMPILER_IS_CLANG) - if(BUILD_SHARED_LIBS) - # clang does not provide rpath if -shared-libasan is used - # https://stackoverflow.com/questions/68571138/asan-dynamic-runtime-is-missing-on-ubuntu-18, https://bugs.llvm.org/show_bug.cgi?id=51271 - set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS},-rpath=$(dirname $($CXX --print-file-name libclang_rt.asan-x86_64.so))") - endif() - if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 8.0) - set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=lld") - endif() + + # clang does not provide rpath if -shared-libasan is used + # https://stackoverflow.com/questions/68571138/asan-dynamic-runtime-is-missing-on-ubuntu-18, https://bugs.llvm.org/show_bug.cgi?id=51271 + if(BUILD_SHARED_LIBS AND ENABLE_SANITIZER AND OV_COMPILER_IS_CLANG) + set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS},-rpath=$(dirname $($CXX --print-file-name libclang_rt.asan-x86_64.so))") + endif() + + if(OV_COMPILER_IS_CLANG AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 8.0) + set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=lld") endif() endif() else() diff --git a/cmake/developer_package/ncc_naming_style/openvino.style b/cmake/developer_package/ncc_naming_style/openvino.style index 6608795381e4a1..141f02b9ab5808 100644 --- a/cmake/developer_package/ncc_naming_style/openvino.style +++ b/cmake/developer_package/ncc_naming_style/openvino.style @@ -18,7 +18,7 @@ VariableReference: '^\w+$' EnumName: '^[A-Z][\w]+$' # excepts element_type -EnumConstantName: '^([A-Z\d_]+|undefined|dynamic|boolean|bf16|f16|f32|f64|i4|i8|i16|i32|i64|u1|u4|u8|u16|u32|u64|nf4|f8e4m3|f8e5m2|string|asymmetric|align_corners|round_prefer_floor|round_prefer_ceil|floor|ceil|simple|nearest|linear|linear_onnx|cubic|area|scales|sizes|half_pixel|tf_half_pixel_for_nn|pytorch_half_pixel|asymetric)$' +EnumConstantName: '^([A-Z\d_]+|undefined|dynamic|boolean|bf16|f16|f32|f64|i4|i8|i16|i32|i64|u1|u2|u3|u4|u6|u8|u16|u32|u64|nf4|f8e4m3|f8e5m2|string|asymmetric|align_corners|round_prefer_floor|round_prefer_ceil|floor|ceil|simple|nearest|linear|linear_onnx|cubic|area|scales|sizes|half_pixel|tf_half_pixel_for_nn|pytorch_half_pixel|asymetric)$' # TODO: align UsingDeclaration: '^.*$' TypedefName: '^.*$' diff --git a/docs/articles_en/documentation/openvino-ir-format/operation-sets/available-opsets/opset14.rst b/docs/articles_en/documentation/openvino-ir-format/operation-sets/available-opsets/opset14.rst index ffccba1adf3181..1cea2c755f1505 100644 --- a/docs/articles_en/documentation/openvino-ir-format/operation-sets/available-opsets/opset14.rst +++ b/docs/articles_en/documentation/openvino-ir-format/operation-sets/available-opsets/opset14.rst @@ -30,7 +30,7 @@ Table of Contents * :doc:`Assign <../operation-specs/infrastructure/assign-3>` * :doc:`Atan <../operation-specs/arithmetic/atan-1>` * :doc:`Atanh <../operation-specs/arithmetic/atanh-3>` -* :doc:`AvgPool <../operation-specs/pooling/avg-pool-1>` +* :doc:`AvgPool <../operation-specs/pooling/avg-pool-14>` * :doc:`BatchNormInference <../operation-specs/normalization/batch-norm-inference-5>` * :doc:`BatchToSpace <../operation-specs/movement/batch-to-space-2>` * :doc:`BinaryConvolution <../operation-specs/convolution/binary-convolution-1>` @@ -120,7 +120,7 @@ Table of Contents * :doc:`LSTMSequence <../operation-specs/sequence/lstm-sequence-1>` * :doc:`MatMul <../operation-specs/matrix/matmul-1>` * :doc:`MatrixNMS <../operation-specs/sort/matrix-non-max-suppression-8>` -* :doc:`MaxPool <../operation-specs/pooling/max-pool-8>` +* :doc:`MaxPool <../operation-specs/pooling/max-pool-14>` * :doc:`Maximum <../operation-specs/arithmetic/maximum-1>` * :doc:`Minimum <../operation-specs/arithmetic/minimum-1>` * :doc:`Mish <../operation-specs/activation/mish-4>` diff --git a/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs.rst b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs.rst index 251fb60a45cd28..07242d20b85327 100644 --- a/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs.rst +++ b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs.rst @@ -23,6 +23,7 @@ Operation Specifications Atan-1 Atanh-3 AvgPool-1 + AvgPool-14 BatchNormInference-1 BatchNormInference-5 BatchToSpace-2 @@ -127,6 +128,7 @@ Operation Specifications MatrixNms-8 MaxPool-1 MaxPool-8 + MaxPool-14 Maximum-1 Minimum-1 Mish-4 diff --git a/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/pooling/avg-pool-14.rst b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/pooling/avg-pool-14.rst new file mode 100644 index 00000000000000..080bcadf05850c --- /dev/null +++ b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/pooling/avg-pool-14.rst @@ -0,0 +1,199 @@ +.. {#openvino_docs_ops_pooling_AvgPool_14} + +AvgPool +======= + + +.. meta:: + :description: Learn about AvgPool-14 - a pooling operation, which can + be performed on a 3D, 4D or 5D input tensor. + +**Versioned name**: *AvgPool-14* + +**Category**: *Pooling* + +**Short description**: Performs the average pooling operation on input. + +**Detailed description**: `Reference `__. Average Pool is a pooling operation that performs down-sampling by dividing the input into pooling regions of size specified by kernel attribute and computing the average values of each region. + +**Attributes**: *Pooling* attributes are specified in the ``data`` node, which is a child of the layer node. + +* *strides* + + * **Description**: *strides* is a distance (in pixels) to slide the window on the feature map over the (z, y, x) axes for 3D poolings and (y, x) axes for 2D poolings. For example, *strides* equal "4,2,1" means sliding the window 4 pixel at a time over depth dimension, 2 over height dimension and 1 over width dimension. + * **Range of values**: integer values starting from 0 + * **Type**: int[] + * **Required**: *yes* + +* *pads_begin* + + * **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal "1,2" means adding 1 pixel to the top of the input and 2 to the left of the input. + * **Range of values**: integer values starting from 0 + * **Type**: int[] + * **Required**: *yes* + * **Note**: the attribute is ignored when *auto_pad* attribute is specified. + +* *pads_end* + + * **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal "1,2" means adding 1 pixel to the bottom of the input and 2 to the right of the input. + * **Range of values**: integer values starting from 0 + * **Type**: int[] + * **Required**: *yes* + * **Note**: the attribute is ignored when *auto_pad* attribute is specified. + +* *kernel* + + * **Description**: *kernel* is a size of each filter. For example, *kernel* equal (2, 3) means that each filter has height equal to 2 and width equal to 3. + * **Range of values**: integer values starting from 1 + * **Type**: int[] + * **Required**: *yes* + +* *exclude-pad* + + * **Description**: *exclude-pad* is a type of pooling strategy for values in the padding area. For example, if *exclude-pad* is "true", then zero-values that came from padding are not included in averaging calculation. + * **Range of values**: true or false + * **Type**: boolean + * **Required**: *yes* + +* *rounding_type* + + * **Description**: *rounding_type* is a type of rounding to be applied. *ceil_torch* does not allow the last pooling to start in the padding area. + * **Range of values**: + * *floor* + * *ceil* + * *ceil_torch* + * **Type**: string + * **Default value**: *floor* + * **Required**: *no* + +* *auto_pad* + + * **Description**: *auto_pad* how the padding is calculated. Possible values: + + * *explicit*: use explicit padding values from `pads_begin` and `pads_end`. + * *same_upper (same_lower)* the input is padded to match the output size. In case of odd padding value an extra padding is added at the end (at the beginning). + * *valid* - do not use padding. + * **Type**: string + * **Default value**: *explicit* + * **Required**: *no* + * **Note**: *pads_begin* and *pads_end* attributes are ignored when *auto_pad* is specified. + +**Input**: + +* **1**: 3D, 4D or 5D input tensor. Input shape can be either ``[N, C, H]``, ``[N, C, H, W]`` or ``[N, C, H, W, D]``. **Required.** + +**Output**: + +* **1**: The output shape is ``[N, C, H_out]``, ``[N, C, H_out, W_out]`` or ``[N, C, H_out, W_out, D_out]``. Output shape calculation rules and examples can be found in :doc:`Pooling Operators shape inference rules `. + +**Types** + +* *T*: floating point or integer type. + +* *T_IND*: ``int64`` or ``int32``. + + +**Examples** + +.. code-block:: xml + :force: + + + + + + 1 + 3 + 32 + 32 + + + + + 1 + 3 + 32 + 32 + + + + + + + + + 1 + 3 + 32 + 32 + + + + + 1 + 3 + 32 + 32 + + + + + + + + + 1 + 3 + 32 + 32 + + + + + 1 + 3 + 10 + 10 + + + + + + + + + 1 + 3 + 32 + 32 + + + + + 1 + 3 + 15 + 15 + + + + + + + + + 1 + 3 + 32 + 32 + + + + + 1 + 3 + 14 + 14 + + + diff --git a/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/pooling/max-pool-14.rst b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/pooling/max-pool-14.rst new file mode 100644 index 00000000000000..4c52ce24ca43e3 --- /dev/null +++ b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/pooling/max-pool-14.rst @@ -0,0 +1,207 @@ +.. {#openvino_docs_ops_pooling_MaxPool_14} + +MaxPool +======= + + +.. meta:: + :description: Learn about MaxPool-14 - a pooling operation, which can + be performed on a 3D, 4D or 5D input tensor. + +**Versioned name**: *MaxPool-14* + +**Category**: *Pooling* + +**Short description**: Performs the max pooling operation on input. + +**Detailed description**: Input shape can be either 3D, 4D, or 5D. The max pooling operation is performed with respect to input shape from the third dimension to the last dimension. If paddings are used, during the pooling calculation their values are ``-inf``. The max pooling operation involves sliding a filter over each channel of a feature map and downsampling by choosing the largest value within the region covered by the filter. + +**Attributes**: *Pooling* attributes are specified in the ``data`` node, which is a child of the layer node. + +* *strides* + + * **Description**: *strides* is a distance (in pixels) to slide the window on the feature map over the (z, y, x) axes for 3D poolings and (y, x) axes for 2D poolings. For example, *strides* equal to "4,2,1" means sliding the window 4 pixels at a time over depth dimension, 2 over height dimension, and 1 over width dimension. + * **Range of values**: integer values starting from 0 + * **Type**: int[] + * **Required**: *yes* + +* *dilations* + + * **Description**: *dilations* specify the index of the next pixel to select when pooling. If not present, the dilation defaults to 1, meaning the adjacent pixel is chosen. A value of 2 indicates that one pixel is skipped and every other pixel is considered. Dilations specify one value for each spatial axis of the kernel: ``(z, y, x)`` for 3D poolings and ``(y, x)`` for 2D poolings. + * **Range of values**: integer values starting from 0 + * **Type**: int[] + * **Default value**: ``[1, 1, ...]`` + * **Required**: *no* + +* *pads_begin* + + * **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal to "1,2" means adding 1 pixel to the top of the input and 2 to the left of the input. All added padding values are equal to negative infinity. + * **Range of values**: integer values starting from 0 + * **Type**: int[] + * **Required**: *yes* + * **Note**: the attribute is ignored when *auto_pad* attribute is specified. + +* *pads_end* + + * **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal to "1,2" means adding 1 pixel to the bottom of the input and 2 to the right of the input. All added padding values are equal to negative infinity. + * **Range of values**: integer values starting from 0 + * **Type**: int[] + * **Required**: *yes* + * **Note**: the attribute is ignored when the *auto_pad* attribute is specified. + +* *kernel* + + * **Description**: *kernel* is a size of each filter. For example, *kernel* equal to (2, 3) means that each filter has height equal to 2 and width equal to 3. + * **Range of values**: integer values starting from 1 + * **Type**: int[] + * **Required**: *yes* + +* *rounding_type* + + * **Description**: *rounding_type* is a type of rounding to be used to compute output shape. *ceil_torch* does not allow the last pooling to start in the padding area. + * **Range of values**: + * *floor* + * *ceil* + * *ceil_torch* + * **Type**: string + * **Default value**: *floor* + * **Required**: *no* + +* *auto_pad* + + * **Description**: *auto_pad* how the padding is calculated. Possible values: + + * *explicit*: explicit padding values from ``pads_begin`` and ``pads_end`` are used. + * *same_upper (same_lower)* the input is padded to match the output size. In case of odd padding value, an extra padding is added at the end (at the beginning). + * *valid* padding is not used. + + * **Type**: string + * **Default value**: *explicit* + * **Required**: *no* + * **Note**: *pads_begin* and *pads_end* attributes are ignored when *auto_pad* is not equal to explicit. + +* *index_element_type* + + * **Description**: the type of output tensor with indices + * **Range of values**: "i64" or "i32" + * **Type**: string + * **Default value**: "i64" + * **Required**: *No* + +* *axis* + + * **Description**: indicator of the first dimension in the input shape that should be used to calculate the upper bound of allowed index output values. The upper bound is the product of dimensions starting from the one pointed by the 'axis' attribute until the end of the input shape. + * **Range of values**: integer number. Negative value means counting dimension from the end. The range is ``[-R, R - 1]``, where ``R`` is the rank of the input tensor. + * **Type**: int + * **Default value**: 0 + * **Required**: *No* + +**Inputs**: + +* **1**: 3D, 4D, or 5D input tensor of type T. Required. + +**Outputs**: + + * **1**: Input shape can be either ``[N, C, H]``, ``[N, C, H, W]``, or ``[N, C, H, W, D]``. The corresponding output shape is ``[N, C, H_out]``, ``[N, C, H_out, W_out]`` or ``[N, C, H_out, W_out, D_out]``. Output tensor has the same data type as the input tensor. Output shape calculation rules and examples can be found in :doc:`Pooling Operators shape inference rules `. + + * **2**: Output tensor of type *T_IND* with indices of values selected by the pooling operation. + Shape of this output matches the first output. The type of this output can be specified using the ``index_element_type`` attribute. + Values are computed as indices in a tensor flattened to 1D, not considering padding. Examples for a 5D input tensor: + + * When ``axis == 0``, the values are in the range ``[0, N * C * H * W * D)``. + * When ``axis == 2``, the values are in the range ``[0, H * W * D)``. + + .. note:: + + The values of this output can only be calculated correctly if ``pads_value`` is set to ``-infinity``. + + +**Types** + +* *T*: floating point or integer type. + +* *T_IND*: ``int64`` or ``int32``. + + +**Examples** + +.. code-block:: xml + :force: + + + + + + 1 + 3 + 32 + 32 + + + + + 1 + 3 + 32 + 32 + + + 1 + 3 + 32 + 32 + + + + + + + + + 1 + 3 + 32 + 32 + + + + + 1 + 3 + 17 + 17 + + + 1 + 3 + 17 + 17 + + + + + + + + + 1 + 3 + 32 + 32 + + + + + 1 + 3 + 16 + 16 + + + 1 + 3 + 16 + 16 + + + diff --git a/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/pooling/pooling_shape_rules.rst b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/pooling/pooling_shape_rules.rst new file mode 100644 index 00000000000000..cc5105a4f11697 --- /dev/null +++ b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/pooling/pooling_shape_rules.rst @@ -0,0 +1,208 @@ +.. {#openvino_docs_pooling_shape_rules} + +Shape calculation rules for Pooling Operators +============================================= + +.. meta:: + :description: Learn about output shape calculation rules for OpenVINO Pooling Operators. + +**Mathematical Formulation** + +Output shape calculation based on ``auto_pad`` and ``rounding_type``: + +* ``auto_pad = explicit`` and ``rounding_type = floor`` + ``H_out = floor((H + pads_begin[0] + pads_end[0] - ((kernel[0] - 1) * dilations[0] + 1)) / strides[0] + 1)`` + ``W_out = floor((W + pads_begin[1] + pads_end[1] - ((kernel[1] - 1) * dilations[1] + 1)) / strides[1] + 1)`` + ``D_out = floor((D + pads_begin[2] + pads_end[2] - ((kernel[2] - 1) * dilations[2] + 1)) / strides[2] + 1)`` + +* ``auto_pad = explicit`` and ``rounding_type = ceil`` + ``H_out = ceil((H + pads_begin[0] + pads_end[0] - ((kernel[0] - 1) * dilations[0] + 1)) / strides[0] + 1)`` + ``W_out = ceil((W + pads_begin[1] + pads_end[1] - ((kernel[1] - 1) * dilations[1] + 1)) / strides[1] + 1)`` + ``D_out = ceil((D + pads_begin[2] + pads_end[2] - ((kernel[2] - 1) * dilations[2] + 1)) / strides[2] + 1)`` + +* ``auto_pad = valid`` + Please note that AvgPool does not support ``dilations`` attribute, in wchich case its value should be replaced with ``1``. + ``H_out = ceil((H - ((kernel[0] - 1) * dilations[0] + 1) + 1) / strides[0])`` + ``W_out = ceil((W - ((kernel[1] - 1) * dilations[1] + 1) + 1) / strides[1])`` + ``D_out = ceil((D - ((kernel[2] - 1) * dilations[2] + 1) + 1) / strides[2])`` + +* ``auto_pad = same_upper / same_lower`` + ``H_out = H`` + ``W_out = W`` + ``D_out = D`` + + +If ``H + pads_begin[i] + pads_end[i] - kernel[i]`` is not divisible by ``strides[i]`` evenly, the result is rounded with respect to the ``rounding_type`` attribute. +If ``rounding_type`` is set to ``ceil_torch``, the last pooling operation within a dimension cannot start in the padding area. If this is the case, the respective dimension is reduced by ``1``. More context can be found in the `PyTorch issue discussion `__. + +**Examples** + +1. Example 1 shows how *MaxPool* operates with 4D input using 2D kernel and ``auto_pad = explicit``. + + .. code-block:: sh + + input = [[[[-1, 2, 3], + [4, 5, -6], + [-7, 8, 9]]]] # shape: (1, 1, 3, 3) + strides = [1, 1] + pads_begin = [1, 1] + pads_end = [1, 1] + kernel = [2, 2] + rounding_type = "floor" + auto_pad = "explicit" + output0 = [[[[-1, 2, 3, 3], + [4, 5, 5, -6], + [4, 8, 9, 9], + [-7, 8, 9, 9]]]] # shape: (1, 1, 4, 4) + output1 = [[[[0, 1, 2, 2], + [3, 4, 4, 5], + [3, 7, 8, 8], + [6, 7, 8, 8]]]] # shape: (1, 1, 4, 4) + + +2. Example 2 shows how *MaxPool* operates with 3D input using 1D kernel and ``auto_pad = valid``. + + .. code-block:: sh + + input = [[[-1, 2, 3, 5, -7, 9, 1]]] # shape: (1, 1, 7) + strides = [1] + kernel = [3] + rounding_type = "floor" + auto_pad = "valid" + output0 = [[[3, 5, 5, 9, 9]]] # shape: (1, 1, 5) + output1 = [[[2, 3, 3, 5, 5]]] # shape: (1, 1, 5) + + +3. Example 3 shows how *MaxPool* operates with 4D input using 2D kernel and ``auto_pad = same_lower``. + + .. code-block:: sh + + input = [[[[-1, 2, 3], + [4, 5, -6], + [-7, 8, 9]]]] # shape: (1, 1, 3, 3) + strides = [1, 1] + kernel = [2, 2] + rounding_type = "floor" + auto_pad = "same_lower" + output0 = [[[[-1, 2, 3], + [4, 5, 5] + [4, 8, 9]]]] # shape: (1, 1, 3, 3) + output1 = [[[[0, 1, 2], + [3, 4, 4], + [3, 7, 8]]]] # shape: (1, 1, 3, 3) + + +4. Example 4 shows how *MaxPool* operates with 4D input using 2D kernel and ``auto_pad = same_upper``. + + .. code-block:: sh + + input = [[[[-1, 2, 3], + [4, 5, -6], + [-7, 8, 9]], + [[2, -1, 5], + [6, -7, 1], + [8, 2, -3]]]] # shape: (1, 2, 3, 3) + strides = [1, 1] + kernel = [2, 2] + rounding_type = "floor" + auto_pad = "same_upper" + output0 = [[[[5, 5, 3], + [8, 9, 9] + [8, 9, 9]], + [[6, 5, 5], + [8, 2, 1], + [8, 2, -3]]]] # shape: (1, 2, 3, 3) + output1 = [[[[4, 4, 2], + [7, 8, 8], + [7, 8, 8]], + [[12, 11, 11], + [15, 16, 14], + [15, 16, 17]]]] # shape: (1, 2, 3, 3) + + +5. Example 5 shows how *MaxPool* operates with 4D input using 2D kernel and ``rounding_type = ceil_torch``. + + .. code-block:: sh + + input = [[[[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]]] # shape: (1, 1, 3, 3) + strides = [2, 2] + kernel = [2, 2] + pads_begin = [1, 1] + pads_end = [1, 1] + rounding_type = "ceil_torch" + output0 = [[[[1, 3], + [7, 9]]]] # shape: (1, 1, 2, 2) + output1 = [[[[0, 2], + [6, 8]]]] # shape: (1, 1, 2, 2) + + +6. Example 6 shows how *MaxPool* operates with 4D input using 2D kernel, ``auto_pad = valid`` and ``rounding_type = ceil``. + + .. code-block:: sh + + input = [[[[-1, 2, 3], + [4, 5, -6], + [-7, 8, 9]]]] # shape: (1, 1, 3, 3) + strides = [2, 2] + kernel = [2, 2] + rounding_type = "ceil" + auto_pad = "valid" + output0 = [[[[5, 3], + [8, 9]]]] # shape: (1, 1, 2, 2) + output1 = [[[[4, 2], + [7, 8]]]] # shape: (1, 1, 2, 2) + + +7. Example 7 shows how *MaxPool* operates on 4D input using dilated 2D kernel, ``auto_pad = explicit`` and ``rounding_type = floor``. + + .. code-block:: sh + + input = [[[[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]]] # shape: (1, 1, 3, 3) + strides = [1, 1] + kernel = [2, 2] + dilations = [2, 2] + rounding_type = "floor" + auto_pad = "explicit" + pads_begin = [1, 1] + pads_end = [1, 1] + output0 = [[[[5, 6, 5], + [8, 9, 8], + [5, 6, 5]]]] # shape: (1, 1, 3, 3) + output1 = [[[[4, 5, 4], + [7, 8, 7], + [4, 5, 4]]]] # shape: (1, 1, 3, 3) + + +8. Example 8 shows how *MaxPool* operates on 4D input using 2D kernel, with non-default ``axis`` value. + +Input shape: (1, 2, 3, 3) +Output shape: (1, 2, 2, 2) + + .. code-block:: sh + + input = [[[[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], + [[10, 11, 12], + [13, 14, 15], + [16, 17, 18]]]] # shape: (1, 2, 3, 3) + strides = [1, 1] + kernel = [2, 2] + dilations = [1, 1] + rounding_type = "floor" + auto_pad = "explicit" + pads_begin = [0, 0] + pads_end = [0, 0] + axis = 2 + output0 = [[[[5, 6], + [8, 9]], + [[14, 15], + [17, 18]]]] # shape: (1, 2, 2, 2) + output1 = [[[[4, 5], + [7, 8]], + [[4, 5], + [7, 8]]]] # shape: (1, 2, 2, 2) diff --git a/docs/articles_en/openvino-workflow/model-optimization-guide/quantizing-models-post-training/basic-quantization-flow.rst b/docs/articles_en/openvino-workflow/model-optimization-guide/quantizing-models-post-training/basic-quantization-flow.rst index 6865869071f10b..de0b0f96cc0e1d 100644 --- a/docs/articles_en/openvino-workflow/model-optimization-guide/quantizing-models-post-training/basic-quantization-flow.rst +++ b/docs/articles_en/openvino-workflow/model-optimization-guide/quantizing-models-post-training/basic-quantization-flow.rst @@ -193,6 +193,15 @@ Tune quantization parameters regex = '.*layer_.*' nncf.quantize(model, dataset, ignored_scope=nncf.IgnoredScope(patterns=regex)) + * Exclude by subgraphs: + + .. code-block:: sh + + subgraph = nncf.Subgraph(inputs=['layer_1', 'layer_2'], outputs=['layer_3']) + nncf.quantize(model, dataset, ignored_scope=nncf.IgnoredScope(subgraphs=[subgraph])) + + In this case, all nodes along all simple paths in the graph from input to output nodes will be excluded from the quantization process. + * ``target_device`` - defines the target device, the specificity of which will be taken into account during optimization. The following values are supported: ``ANY`` (default), ``CPU``, ``CPU_SPR``, ``GPU``, and ``NPU``. .. code-block:: sh diff --git a/docs/nbdoc/consts.py b/docs/nbdoc/consts.py index 82294d41680f5d..3993b7835c196c 100644 --- a/docs/nbdoc/consts.py +++ b/docs/nbdoc/consts.py @@ -6,7 +6,7 @@ repo_owner = "openvinotoolkit" repo_name = "openvino_notebooks" repo_branch = "tree/main" -artifacts_link = "http://repository.toolbox.iotg.sclab.intel.com/projects/ov-notebook/0.1.0-latest/20240209220807/dist/rst_files/" +artifacts_link = "http://repository.toolbox.iotg.sclab.intel.com/projects/ov-notebook/0.1.0-latest/20240312220809/dist/rst_files/" blacklisted_extensions = ['.xml', '.bin'] notebooks_repo = "https://github.com/openvinotoolkit/openvino_notebooks/blob/main/" notebooks_binder = "https://mybinder.org/v2/gh/openvinotoolkit/openvino_notebooks/HEAD?filepath=" diff --git a/docs/notebooks/001-hello-world-with-output.rst b/docs/notebooks/001-hello-world-with-output.rst index 2806df1eaaad05..9104721f72f467 100644 --- a/docs/notebooks/001-hello-world-with-output.rst +++ b/docs/notebooks/001-hello-world-with-output.rst @@ -43,19 +43,19 @@ Imports .. code:: ipython3 from pathlib import Path - + import cv2 import matplotlib.pyplot as plt import numpy as np import openvino as ov - + # Fetch `notebook_utils` module import urllib.request urllib.request.urlretrieve( url='https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/main/notebooks/utils/notebook_utils.py', filename='notebook_utils.py' ) - + from notebook_utils import download_file Download the Model and data samples @@ -66,15 +66,15 @@ Download the Model and data samples .. code:: ipython3 base_artifacts_dir = Path('./artifacts').expanduser() - + model_name = "v3-small_224_1.0_float" model_xml_name = f'{model_name}.xml' model_bin_name = f'{model_name}.bin' - + model_xml_path = base_artifacts_dir / model_xml_name - + base_url = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/mobelinet-v3-tf/FP32/' - + if not model_xml_path.exists(): download_file(base_url + model_xml_name, model_xml_name, base_artifacts_dir) download_file(base_url + model_bin_name, model_bin_name, base_artifacts_dir) @@ -104,7 +104,7 @@ select device from dropdown list for running inference using OpenVINO .. code:: ipython3 import ipywidgets as widgets - + core = ov.Core() device = widgets.Dropdown( options=core.available_devices + ["AUTO"], @@ -112,7 +112,7 @@ select device from dropdown list for running inference using OpenVINO description='Device:', disabled=False, ) - + device @@ -134,7 +134,7 @@ Load the Model core = ov.Core() model = core.read_model(model=model_xml_path) compiled_model = core.compile_model(model=model, device_name=device.value) - + output_layer = compiled_model.output(0) Load an Image @@ -149,13 +149,13 @@ Load an Image "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/coco.jpg", directory="data" ) - + # The MobileNet model expects images in RGB format. image = cv2.cvtColor(cv2.imread(filename=str(image_filename)), code=cv2.COLOR_BGR2RGB) - + # Resize to MobileNet image shape. input_image = cv2.resize(src=image, dsize=(224, 224)) - + # Reshape to model input shape. input_image = np.expand_dims(input_image, 0) plt.imshow(image); @@ -187,7 +187,7 @@ Do Inference "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/datasets/imagenet/imagenet_2012.txt", directory="data" ) - + imagenet_classes = imagenet_filename.read_text().splitlines() @@ -202,7 +202,7 @@ Do Inference # The model description states that for this model, class 0 is a background. # Therefore, a background must be added at the beginning of imagenet_classes. imagenet_classes = ['background'] + imagenet_classes - + imagenet_classes[result_index] diff --git a/docs/notebooks/001-hello-world-with-output_files/001-hello-world-with-output_11_1.png b/docs/notebooks/001-hello-world-with-output_files/001-hello-world-with-output_11_1.png index 3f302494abcb62..a142093f6e675c 100644 --- a/docs/notebooks/001-hello-world-with-output_files/001-hello-world-with-output_11_1.png +++ b/docs/notebooks/001-hello-world-with-output_files/001-hello-world-with-output_11_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2dd4338c6c163e7693885ce544e8c9cd2aecedf3b136fa295e22877f37b5634c +oid sha256:5712bd24e962ae0e0267607554ebe1f2869c223b108876ce10e5d20fe6285126 size 387941 diff --git a/docs/notebooks/002-openvino-api-with-output.rst b/docs/notebooks/002-openvino-api-with-output.rst index 26599ac60b8b31..e88ae2e7add149 100644 --- a/docs/notebooks/002-openvino-api-with-output.rst +++ b/docs/notebooks/002-openvino-api-with-output.rst @@ -41,16 +41,16 @@ Table of contents: .. code:: ipython3 # Required imports. Please execute this cell first. - %pip install -q "openvino>=2023.1.0" - %pip install requests tqdm ipywidgets - + %pip install -q "openvino>=2023.1.0" + %pip install -q requests tqdm ipywidgets + # Fetch `notebook_utils` module import urllib.request urllib.request.urlretrieve( url='https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/main/notebooks/utils/notebook_utils.py', filename='notebook_utils.py' ) - + from notebook_utils import download_file @@ -59,47 +59,6 @@ Table of contents: Note: you may need to restart the kernel to use updated packages. -.. parsed-literal:: - - Requirement already satisfied: requests in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (2.31.0) - Requirement already satisfied: tqdm in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (4.66.1) - Requirement already satisfied: ipywidgets in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (8.1.2) - Requirement already satisfied: charset-normalizer<4,>=2 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests) (3.3.2) - Requirement already satisfied: idna<4,>=2.5 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests) (3.6) - Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests) (2.2.0) - Requirement already satisfied: certifi>=2017.4.17 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests) (2024.2.2) - Requirement already satisfied: comm>=0.1.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets) (0.2.1) - Requirement already satisfied: ipython>=6.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets) (8.12.3) - Requirement already satisfied: traitlets>=4.3.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets) (5.14.1) - Requirement already satisfied: widgetsnbextension~=4.0.10 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets) (4.0.10) - Requirement already satisfied: jupyterlab-widgets~=3.0.10 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets) (3.0.10) - - -.. parsed-literal:: - - Requirement already satisfied: backcall in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (0.2.0) - Requirement already satisfied: decorator in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (5.1.1) - Requirement already satisfied: jedi>=0.16 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (0.19.1) - Requirement already satisfied: matplotlib-inline in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (0.1.6) - Requirement already satisfied: pickleshare in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (0.7.5) - Requirement already satisfied: prompt-toolkit!=3.0.37,<3.1.0,>=3.0.30 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (3.0.43) - Requirement already satisfied: pygments>=2.4.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (2.17.2) - Requirement already satisfied: stack-data in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (0.6.3) - Requirement already satisfied: typing-extensions in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (4.9.0) - Requirement already satisfied: pexpect>4.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (4.9.0) - - -.. parsed-literal:: - - Requirement already satisfied: parso<0.9.0,>=0.8.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from jedi>=0.16->ipython>=6.1.0->ipywidgets) (0.8.3) - Requirement already satisfied: ptyprocess>=0.5 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from pexpect>4.3->ipython>=6.1.0->ipywidgets) (0.7.0) - Requirement already satisfied: wcwidth in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from prompt-toolkit!=3.0.37,<3.1.0,>=3.0.30->ipython>=6.1.0->ipywidgets) (0.2.13) - Requirement already satisfied: executing>=1.2.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (2.0.1) - Requirement already satisfied: asttokens>=2.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (2.4.1) - Requirement already satisfied: pure-eval in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (0.2.2) - Requirement already satisfied: six>=1.12.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from asttokens>=2.1.0->stack-data->ipython>=6.1.0->ipywidgets) (1.16.0) - - .. parsed-literal:: Note: you may need to restart the kernel to use updated packages. @@ -115,7 +74,7 @@ Initialize OpenVINO Runtime with ``ov.Core()`` .. code:: ipython3 import openvino as ov - + core = ov.Core() OpenVINO Runtime can load a network on a device. A device in this @@ -124,15 +83,10 @@ context means a CPU, an Intel GPU, a Neural Compute Stick 2, etc. The system. The “FULL_DEVICE_NAME” option to ``core.get_property()`` shows the name of the device. -In this notebook, the CPU device is used. To use an integrated GPU, use -``device_name="GPU"`` instead. Be aware that loading a network on GPU -will be slower than loading a network on CPU, but inference will likely -be faster. - .. code:: ipython3 devices = core.available_devices - + for device in devices: device_name = core.get_property(device, "FULL_DEVICE_NAME") print(f"{device}: {device_name}") @@ -143,6 +97,34 @@ be faster. CPU: Intel(R) Core(TM) i9-10920X CPU @ 3.50GHz +Select device for inference +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can specify which device from available devices will be used for +inference using this widget + +.. code:: ipython3 + + import ipywidgets as widgets + + device = widgets.Dropdown( + options=core.available_devices, + value=core.available_devices[0], + description='Device:', + disabled=False, + ) + + device + + + + +.. parsed-literal:: + + Dropdown(description='Device:', options=('CPU',), value='CPU') + + + Loading a Model --------------- @@ -153,7 +135,7 @@ After initializing OpenVINO Runtime, first read the model file with ``compile_model()`` method. `OpenVINO™ supports several model -formats `__ +formats `__ and enables developers to convert them to its own OpenVINO IR format using a tool dedicated to this task. @@ -193,7 +175,7 @@ notebooks. ir_model_url = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/002-example-models/' ir_model_name_xml = 'classification.xml' ir_model_name_bin = 'classification.bin' - + download_file(ir_model_url + ir_model_name_xml, filename=ir_model_name_xml, directory='model') download_file(ir_model_url + ir_model_name_bin, filename=ir_model_name_bin, directory='model') @@ -214,19 +196,19 @@ notebooks. .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') .. code:: ipython3 import openvino as ov - + core = ov.Core() classification_model_xml = "model/classification.xml" - + model = core.read_model(model=classification_model_xml) - compiled_model = core.compile_model(model=model, device_name="CPU") + compiled_model = core.compile_model(model=model, device_name=device.value) ONNX Model ~~~~~~~~~~ @@ -249,7 +231,7 @@ points to the filename of an ONNX model. onnx_model_url = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/002-example-models/segmentation.onnx' onnx_model_name = 'segmentation.onnx' - + download_file(onnx_model_url, filename=onnx_model_name, directory='model') @@ -263,19 +245,19 @@ points to the filename of an ONNX model. .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/segmentation.onnx') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/segmentation.onnx') .. code:: ipython3 import openvino as ov - + core = ov.Core() onnx_model_path = "model/segmentation.onnx" - + model_onnx = core.read_model(model=onnx_model_path) - compiled_model_onnx = core.compile_model(model=model_onnx, device_name="CPU") + compiled_model_onnx = core.compile_model(model=model_onnx, device_name=device.value) The ONNX model can be exported to OpenVINO IR with ``save_model()``: @@ -298,7 +280,7 @@ without any conversion step. Pass the filename with extension to paddle_model_url = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/002-example-models/' paddle_model_name = 'inference.pdmodel' paddle_params_name = 'inference.pdiparams' - + download_file(paddle_model_url + paddle_model_name, filename=paddle_model_name, directory='model') download_file(paddle_model_url + paddle_params_name, filename=paddle_params_name, directory='model') @@ -319,19 +301,19 @@ without any conversion step. Pass the filename with extension to .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/inference.pdiparams') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/inference.pdiparams') .. code:: ipython3 import openvino as ov - + core = ov.Core() paddle_model_path = 'model/inference.pdmodel' - + model_paddle = core.read_model(model=paddle_model_path) - compiled_model_paddle = core.compile_model(model=model_paddle, device_name="CPU") + compiled_model_paddle = core.compile_model(model=model_paddle, device_name=device.value) .. code:: ipython3 @@ -349,7 +331,7 @@ TensorFlow models saved in frozen graph format can also be passed to pb_model_url = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/002-example-models/classification.pb' pb_model_name = 'classification.pb' - + download_file(pb_model_url, filename=pb_model_name, directory='model') @@ -363,19 +345,19 @@ TensorFlow models saved in frozen graph format can also be passed to .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.pb') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.pb') .. code:: ipython3 import openvino as ov - + core = ov.Core() tf_model_path = "model/classification.pb" - + model_tf = core.read_model(model=tf_model_path) - compiled_model_tf = core.compile_model(model=model_tf, device_name="CPU") + compiled_model_tf = core.compile_model(model=model_tf, device_name=device.value) .. code:: ipython3 @@ -398,10 +380,10 @@ It is pre-trained model optimized to work with TensorFlow Lite. .. code:: ipython3 from pathlib import Path - + tflite_model_url = 'https://www.kaggle.com/models/tensorflow/inception/frameworks/tfLite/variations/v4-quant/versions/1?lite-format=tflite' tflite_model_path = Path('model/classification.tflite') - + download_file(tflite_model_url, filename=tflite_model_path.name, directory=tflite_model_path.parent) @@ -415,18 +397,18 @@ It is pre-trained model optimized to work with TensorFlow Lite. .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.tflite') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.tflite') .. code:: ipython3 import openvino as ov - + core = ov.Core() - + model_tflite = core.read_model(tflite_model_path) - compiled_model_tflite = core.compile_model(model=model_tflite, device_name="CPU") + compiled_model_tflite = core.compile_model(model=model_tflite, device_name=device.value) .. code:: ipython3 @@ -453,15 +435,15 @@ model form torchvision library. After conversion model using import openvino as ov import torch from torchvision.models import resnet18, ResNet18_Weights - + core = ov.Core() - + pt_model = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1) example_input = torch.zeros((1, 3, 224, 224)) ov_model_pytorch = ov.convert_model(pt_model, example_input=example_input) - - compiled_model_pytorch = core.compile_model(ov_model_pytorch, device_name="CPU") - + + compiled_model_pytorch = core.compile_model(ov_model_pytorch, device_name=device.value) + ov.save_model(ov_model_pytorch, "model/exported_pytorch_model.xml") Getting Information about a Model @@ -481,7 +463,7 @@ Information about the inputs and outputs of the model are in ir_model_url = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/002-example-models/' ir_model_name_xml = 'classification.xml' ir_model_name_bin = 'classification.bin' - + download_file(ir_model_url + ir_model_name_xml, filename=ir_model_name_xml, directory='model') download_file(ir_model_url + ir_model_name_bin, filename=ir_model_name_bin, directory='model') @@ -500,7 +482,7 @@ Information about the inputs and outputs of the model are in .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') @@ -515,7 +497,7 @@ dictionary. .. code:: ipython3 import openvino as ov - + core = ov.Core() classification_model_xml = "model/classification.xml" model = core.read_model(model=classification_model_xml) @@ -587,7 +569,7 @@ Model Outputs .. code:: ipython3 import openvino as ov - + core = ov.Core() classification_model_xml = "model/classification.xml" model = core.read_model(model=classification_model_xml) @@ -691,7 +673,7 @@ produced data as values. ir_model_url = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/002-example-models/' ir_model_name_xml = 'classification.xml' ir_model_name_bin = 'classification.bin' - + download_file(ir_model_url + ir_model_name_xml, filename=ir_model_name_xml, directory='model') download_file(ir_model_url + ir_model_name_bin, filename=ir_model_name_bin, directory='model') @@ -710,18 +692,18 @@ produced data as values. .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') .. code:: ipython3 import openvino as ov - + core = ov.Core() classification_model_xml = "model/classification.xml" model = core.read_model(model=classification_model_xml) - compiled_model = core.compile_model(model=model, device_name="CPU") + compiled_model = core.compile_model(model=model, device_name=device.value) input_layer = compiled_model.input(0) output_layer = compiled_model.output(0) @@ -734,7 +716,7 @@ the input layout of the network. .. code:: ipython3 import cv2 - + image_filename = download_file( "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/coco_hollywood.jpg", directory="data" @@ -789,7 +771,7 @@ add the ``N`` dimension (where ``N``\ = 1) by calling the .. code:: ipython3 import numpy as np - + input_data = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0).astype(np.float32) input_data.shape @@ -814,10 +796,10 @@ predicted result in ``np.array`` format. # for single input models only result = compiled_model(input_data)[output_layer] - + # for multiple inputs in a list result = compiled_model([input_data])[output_layer] - + # or using a dictionary, where the key is input tensor name or index result = compiled_model({input_layer.any_name: input_data})[output_layer] @@ -878,7 +860,7 @@ input shape. ir_model_url = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/002-example-models/' ir_model_name_xml = 'segmentation.xml' ir_model_name_bin = 'segmentation.bin' - + download_file(ir_model_url + ir_model_name_xml, filename=ir_model_name_xml, directory='model') download_file(ir_model_url + ir_model_name_bin, filename=ir_model_name_bin, directory='model') @@ -899,27 +881,27 @@ input shape. .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/segmentation.bin') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/segmentation.bin') .. code:: ipython3 import openvino as ov - + core = ov.Core() segmentation_model_xml = "model/segmentation.xml" segmentation_model = core.read_model(model=segmentation_model_xml) segmentation_input_layer = segmentation_model.input(0) segmentation_output_layer = segmentation_model.output(0) - + print("~~~~ ORIGINAL MODEL ~~~~") print(f"input shape: {segmentation_input_layer.shape}") print(f"output shape: {segmentation_output_layer.shape}") - + new_shape = ov.PartialShape([1, 3, 544, 544]) segmentation_model.reshape({segmentation_input_layer.any_name: new_shape}) - segmentation_compiled_model = core.compile_model(model=segmentation_model, device_name="CPU") + segmentation_compiled_model = core.compile_model(model=segmentation_model, device_name=device.value) # help(segmentation_compiled_model) print("~~~~ RESHAPED MODEL ~~~~") print(f"model input shape: {segmentation_input_layer.shape}") @@ -966,15 +948,15 @@ set ``new_shape = (2,3,544,544)`` in the cell above. .. code:: ipython3 import openvino as ov - + segmentation_model_xml = "model/segmentation.xml" segmentation_model = core.read_model(model=segmentation_model_xml) segmentation_input_layer = segmentation_model.input(0) segmentation_output_layer = segmentation_model.output(0) new_shape = ov.PartialShape([2, 3, 544, 544]) segmentation_model.reshape({segmentation_input_layer.any_name: new_shape}) - segmentation_compiled_model = core.compile_model(model=segmentation_model, device_name="CPU") - + segmentation_compiled_model = core.compile_model(model=segmentation_model, device_name=device.value) + print(f"input shape: {segmentation_input_layer.shape}") print(f"output shape: {segmentation_output_layer.shape}") @@ -993,7 +975,7 @@ input image through the network to see the result: import numpy as np import openvino as ov - + core = ov.Core() segmentation_model_xml = "model/segmentation.xml" segmentation_model = core.read_model(model=segmentation_model_xml) @@ -1001,11 +983,11 @@ input image through the network to see the result: segmentation_output_layer = segmentation_model.output(0) new_shape = ov.PartialShape([2, 3, 544, 544]) segmentation_model.reshape({segmentation_input_layer.any_name: new_shape}) - segmentation_compiled_model = core.compile_model(model=segmentation_model, device_name="CPU") + segmentation_compiled_model = core.compile_model(model=segmentation_model, device_name=device.value) input_data = np.random.rand(2, 3, 544, 544) - + output = segmentation_compiled_model([input_data]) - + print(f"input data shape: {input_data.shape}") print(f"result data data shape: {segmentation_output_layer.shape}") @@ -1043,7 +1025,7 @@ the cache. ir_model_url = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/002-example-models/' ir_model_name_xml = 'classification.xml' ir_model_name_bin = 'classification.bin' - + download_file(ir_model_url + ir_model_name_xml, filename=ir_model_name_xml, directory='model') download_file(ir_model_url + ir_model_name_bin, filename=ir_model_name_bin, directory='model') @@ -1062,7 +1044,7 @@ the cache. .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') @@ -1070,27 +1052,30 @@ the cache. import time from pathlib import Path - + import openvino as ov - + core = ov.Core() + + cache_path = Path("model/model_cache") + cache_path.mkdir(exist_ok=True) + # Enable caching for OpenVINO Runtime. To disable caching set enable_caching = False + enable_caching = True + config_dict = {"CACHE_DIR": str(cache_path)} if enable_caching else {} + + classification_model_xml = "model/classification.xml" + model = core.read_model(model=classification_model_xml) + + start_time = time.perf_counter() + compiled_model = core.compile_model(model=model, device_name=device.value, config=config_dict) + end_time = time.perf_counter() + print(f"Loading the network to the {device.value} device took {end_time-start_time:.2f} seconds.") - device_name = "GPU" - if device_name in core.available_devices: - cache_path = Path("model/model_cache") - cache_path.mkdir(exist_ok=True) - # Enable caching for OpenVINO Runtime. To disable caching set enable_caching = False - enable_caching = True - config_dict = {"CACHE_DIR": str(cache_path)} if enable_caching else {} +.. parsed-literal:: - classification_model_xml = "model/classification.xml" - model = core.read_model(model=classification_model_xml) + Loading the network to the CPU device took 0.16 seconds. - start_time = time.perf_counter() - compiled_model = core.compile_model(model=model, device_name=device_name, config=config_dict) - end_time = time.perf_counter() - print(f"Loading the network to the {device_name} device took {end_time-start_time:.2f} seconds.") After running the previous cell, we know the model exists in the cache directory. Then, we delete the compiled model and load it again. Now, we @@ -1098,9 +1083,14 @@ measure the time it takes now. .. code:: ipython3 - if device_name in core.available_devices: - del compiled_model - start_time = time.perf_counter() - compiled_model = core.compile_model(model=model, device_name=device_name, config=config_dict) - end_time = time.perf_counter() - print(f"Loading the network to the {device_name} device took {end_time-start_time:.2f} seconds.") + del compiled_model + start_time = time.perf_counter() + compiled_model = core.compile_model(model=model, device_name=device.value, config=config_dict) + end_time = time.perf_counter() + print(f"Loading the network to the {device.value} device took {end_time-start_time:.2f} seconds.") + + +.. parsed-literal:: + + Loading the network to the CPU device took 0.09 seconds. + diff --git a/docs/notebooks/003-hello-segmentation-with-output.rst b/docs/notebooks/003-hello-segmentation-with-output.rst index 81b620b7ad379b..16a5c9680c233e 100644 --- a/docs/notebooks/003-hello-segmentation-with-output.rst +++ b/docs/notebooks/003-hello-segmentation-with-output.rst @@ -44,14 +44,14 @@ Imports import matplotlib.pyplot as plt import numpy as np import openvino as ov - + # Fetch `notebook_utils` module import urllib.request urllib.request.urlretrieve( url='https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/main/notebooks/utils/notebook_utils.py', filename='notebook_utils.py' ) - + from notebook_utils import segmentation_map_to_image, download_file Download model weights @@ -62,19 +62,19 @@ Download model weights .. code:: ipython3 from pathlib import Path - + base_model_dir = Path("./model").expanduser() - + model_name = "road-segmentation-adas-0001" model_xml_name = f'{model_name}.xml' model_bin_name = f'{model_name}.bin' - + model_xml_path = base_model_dir / model_xml_name - + if not model_xml_path.exists(): model_xml_url = "https://storage.openvinotoolkit.org/repositories/open_model_zoo/2023.0/models_bin/1/road-segmentation-adas-0001/FP32/road-segmentation-adas-0001.xml" model_bin_url = "https://storage.openvinotoolkit.org/repositories/open_model_zoo/2023.0/models_bin/1/road-segmentation-adas-0001/FP32/road-segmentation-adas-0001.bin" - + download_file(model_xml_url, model_xml_name, base_model_dir) download_file(model_bin_url, model_bin_name, base_model_dir) else: @@ -103,7 +103,7 @@ select device from dropdown list for running inference using OpenVINO .. code:: ipython3 import ipywidgets as widgets - + core = ov.Core() device = widgets.Dropdown( options=core.available_devices + ["AUTO"], @@ -111,7 +111,7 @@ select device from dropdown list for running inference using OpenVINO description='Device:', disabled=False, ) - + device @@ -131,10 +131,10 @@ Load the Model .. code:: ipython3 core = ov.Core() - + model = core.read_model(model=model_xml_path) compiled_model = core.compile_model(model=model, device_name=device.value) - + input_layer_ir = compiled_model.input(0) output_layer_ir = compiled_model.output(0) @@ -152,23 +152,23 @@ is provided. "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/empty_road_mapillary.jpg", directory="data" ) - + # The segmentation network expects images in BGR format. image = cv2.imread(str(image_filename)) - + rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image_h, image_w, _ = image.shape - + # N,C,H,W = batch size, number of channels, height, width. N, C, H, W = input_layer_ir.shape - + # OpenCV resize expects the destination size as (width, height). resized_image = cv2.resize(image, (W, H)) - + # Reshape to the network input shape. input_image = np.expand_dims( resized_image.transpose(2, 0, 1), 0 - ) + ) plt.imshow(rgb_image) @@ -182,7 +182,7 @@ is provided. .. parsed-literal:: - + @@ -199,7 +199,7 @@ Do Inference # Run the inference. result = compiled_model([input_image])[output_layer_ir] - + # Prepare data for visualization. segmentation_mask = np.argmax(result, axis=1) plt.imshow(segmentation_mask.transpose(1, 2, 0)) @@ -209,7 +209,7 @@ Do Inference .. parsed-literal:: - + @@ -226,14 +226,14 @@ Prepare Data for Visualization # Define colormap, each color represents a class. colormap = np.array([[68, 1, 84], [48, 103, 141], [53, 183, 120], [199, 216, 52]]) - + # Define the transparency of the segmentation mask on the photo. alpha = 0.3 - + # Use function from notebook_utils.py to transform mask to an RGB image. mask = segmentation_map_to_image(segmentation_mask, colormap) resized_mask = cv2.resize(mask, (image_w, image_h)) - + # Create an image with mask. image_with_mask = cv2.addWeighted(resized_mask, alpha, rgb_image, 1 - alpha, 0) @@ -246,16 +246,16 @@ Visualize data # Define titles with images. data = {"Base Photo": rgb_image, "Segmentation": mask, "Masked Photo": image_with_mask} - + # Create a subplot to visualize images. fig, axs = plt.subplots(1, len(data.items()), figsize=(15, 10)) - + # Fill the subplot. for ax, (name, image) in zip(axs, data.items()): ax.axis('off') ax.set_title(name) ax.imshow(image) - + # Display an image. plt.show(fig) diff --git a/docs/notebooks/003-hello-segmentation-with-output_files/003-hello-segmentation-with-output_11_2.png b/docs/notebooks/003-hello-segmentation-with-output_files/003-hello-segmentation-with-output_11_2.png index b25632d5a95adc..5023362b06be2d 100644 --- a/docs/notebooks/003-hello-segmentation-with-output_files/003-hello-segmentation-with-output_11_2.png +++ b/docs/notebooks/003-hello-segmentation-with-output_files/003-hello-segmentation-with-output_11_2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fac1140a8f375892b9637d611b5b4d7e51f78a45f3af3ab3b33e74b97baa18 +oid sha256:96f0eb3a9535d57b8784be4b717dc9f280e4bf107e5b61d7cf51b36e142e4c7a size 249032 diff --git a/docs/notebooks/003-hello-segmentation-with-output_files/003-hello-segmentation-with-output_13_1.png b/docs/notebooks/003-hello-segmentation-with-output_files/003-hello-segmentation-with-output_13_1.png index bc896aa9ecad65..fe6d042ef77d30 100644 --- a/docs/notebooks/003-hello-segmentation-with-output_files/003-hello-segmentation-with-output_13_1.png +++ b/docs/notebooks/003-hello-segmentation-with-output_files/003-hello-segmentation-with-output_13_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57f846855a237159135e3491021459d4d94b97ddb0458c74b7bc35cca5ff0d5a +oid sha256:caef59a6c15a5a1d512f4dd22395b12fbd754bba264ea5f0deae323ff8edee39 size 20550 diff --git a/docs/notebooks/003-hello-segmentation-with-output_files/003-hello-segmentation-with-output_17_0.png b/docs/notebooks/003-hello-segmentation-with-output_files/003-hello-segmentation-with-output_17_0.png index 7865ccd173a606..310b0d3545d48c 100644 --- a/docs/notebooks/003-hello-segmentation-with-output_files/003-hello-segmentation-with-output_17_0.png +++ b/docs/notebooks/003-hello-segmentation-with-output_files/003-hello-segmentation-with-output_17_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:655bbfae2c0894ca52bb1cdaf7de64bbad747265984e968afc86f0198bc6600d +oid sha256:6a3137d9359a44fb19e1900e6b808f9e7e7ded0ba209abe8c4bd90fcf37b1c6a size 260045 diff --git a/docs/notebooks/004-hello-detection-with-output.rst b/docs/notebooks/004-hello-detection-with-output.rst index b7a25d706fdb8f..989fc9866bd16e 100644 --- a/docs/notebooks/004-hello-detection-with-output.rst +++ b/docs/notebooks/004-hello-detection-with-output.rst @@ -50,14 +50,14 @@ Imports import numpy as np import openvino as ov from pathlib import Path - + # Fetch `notebook_utils` module import urllib.request urllib.request.urlretrieve( url='https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/main/notebooks/utils/notebook_utils.py', filename='notebook_utils.py' ) - + from notebook_utils import download_file Download model weights @@ -68,18 +68,18 @@ Download model weights .. code:: ipython3 base_model_dir = Path("./model").expanduser() - + model_name = "horizontal-text-detection-0001" model_xml_name = f'{model_name}.xml' model_bin_name = f'{model_name}.bin' - + model_xml_path = base_model_dir / model_xml_name model_bin_path = base_model_dir / model_bin_name - + if not model_xml_path.exists(): model_xml_url = "https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.3/models_bin/1/horizontal-text-detection-0001/FP32/horizontal-text-detection-0001.xml" model_bin_url = "https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.3/models_bin/1/horizontal-text-detection-0001/FP32/horizontal-text-detection-0001.bin" - + download_file(model_xml_url, model_xml_name, base_model_dir) download_file(model_bin_url, model_bin_name, base_model_dir) else: @@ -108,7 +108,7 @@ select device from dropdown list for running inference using OpenVINO .. code:: ipython3 import ipywidgets as widgets - + core = ov.Core() device = widgets.Dropdown( options=core.available_devices + ["AUTO"], @@ -116,7 +116,7 @@ select device from dropdown list for running inference using OpenVINO description='Device:', disabled=False, ) - + device @@ -136,10 +136,10 @@ Load the Model .. code:: ipython3 core = ov.Core() - + model = core.read_model(model=model_xml_path) - compiled_model = core.compile_model(model=model, device_name="CPU") - + compiled_model = core.compile_model(model=model, device_name=device.value) + input_layer_ir = compiled_model.input(0) output_layer_ir = compiled_model.output("boxes") @@ -155,19 +155,19 @@ Load an Image "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/intel_rnb.jpg", directory="data" ) - + # Text detection models expect an image in BGR format. image = cv2.imread(str(image_filename)) - + # N,C,H,W = batch size, number of channels, height, width. N, C, H, W = input_layer_ir.shape - + # Resize the image to meet network expected input sizes. resized_image = cv2.resize(image, (W, H)) - + # Reshape to the network input shape. input_image = np.expand_dims(resized_image.transpose(2, 0, 1), 0) - + plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)); @@ -190,7 +190,7 @@ Do Inference # Create an inference request. boxes = compiled_model([input_image])[output_layer_ir] - + # Remove zero only boxes. boxes = boxes[~np.all(boxes == 0, axis=1)] @@ -206,31 +206,31 @@ Visualize Results def convert_result_to_image(bgr_image, resized_image, boxes, threshold=0.3, conf_labels=True): # Define colors for boxes and descriptions. colors = {"red": (255, 0, 0), "green": (0, 255, 0)} - + # Fetch the image shapes to calculate a ratio. (real_y, real_x), (resized_y, resized_x) = bgr_image.shape[:2], resized_image.shape[:2] ratio_x, ratio_y = real_x / resized_x, real_y / resized_y - + # Convert the base image from BGR to RGB format. rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB) - + # Iterate through non-zero boxes. for box in boxes: # Pick a confidence factor from the last place in an array. conf = box[-1] if conf > threshold: # Convert float to int and multiply corner position of each box by x and y ratio. - # If the bounding box is found at the top of the image, - # position the upper box bar little lower to make it visible on the image. + # If the bounding box is found at the top of the image, + # position the upper box bar little lower to make it visible on the image. (x_min, y_min, x_max, y_max) = [ - int(max(corner_position * ratio_y, 10)) if idx % 2 + int(max(corner_position * ratio_y, 10)) if idx % 2 else int(corner_position * ratio_x) for idx, corner_position in enumerate(box[:-1]) ] - + # Draw a box based on the position, parameters in rectangle function are: image, start_point, end_point, color, thickness. rgb_image = cv2.rectangle(rgb_image, (x_min, y_min), (x_max, y_max), colors["green"], 3) - + # Add text to the image based on position and confidence. # Parameters in text function are: image, text, bottom-left_corner_textfield, font, font_scale, color, thickness, line_type. if conf_labels: @@ -244,7 +244,7 @@ Visualize Results 1, cv2.LINE_AA, ) - + return rgb_image .. code:: ipython3 diff --git a/docs/notebooks/004-hello-detection-with-output_files/004-hello-detection-with-output_11_1.png b/docs/notebooks/004-hello-detection-with-output_files/004-hello-detection-with-output_11_1.png index 948a5c0d52b8c1..b696d287ded448 100644 --- a/docs/notebooks/004-hello-detection-with-output_files/004-hello-detection-with-output_11_1.png +++ b/docs/notebooks/004-hello-detection-with-output_files/004-hello-detection-with-output_11_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:538f477cad7f1fad28669553d8cac16e28f403c0eab9e95a2546b071dd2fbfa1 +oid sha256:c7a830fedc5653fd506c656144decc048cad5a7651c8e498024f0eb0ab8c8e96 size 305482 diff --git a/docs/notebooks/004-hello-detection-with-output_files/004-hello-detection-with-output_16_0.png b/docs/notebooks/004-hello-detection-with-output_files/004-hello-detection-with-output_16_0.png index 7d4e37ce9b0aff..5e6438a788597e 100644 --- a/docs/notebooks/004-hello-detection-with-output_files/004-hello-detection-with-output_16_0.png +++ b/docs/notebooks/004-hello-detection-with-output_files/004-hello-detection-with-output_16_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18a797607c8b3a7b14e6949e480c9ccd6a9629fa2026cdedfe5c4b6ab891edbd +oid sha256:edb00cb4f0e2c42cd9e0f90939afbd6352ca40c90866821898f2c42c1fd9df64 size 457214 diff --git a/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst b/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst index 1008434b092e90..f7ec33011b6e4c 100644 --- a/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst +++ b/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst @@ -8,7 +8,7 @@ Representation `__. After creating the OpenVINO IR, load the model in `OpenVINO -Runtime `__ +Runtime `__ and do inference with a sample image. Table of contents: @@ -56,33 +56,33 @@ Imports import time from pathlib import Path - + import cv2 import matplotlib.pyplot as plt import numpy as np import openvino as ov import tensorflow as tf - + # Fetch `notebook_utils` module import urllib.request urllib.request.urlretrieve( url='https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/main/notebooks/utils/notebook_utils.py', filename='notebook_utils.py' ) - + from notebook_utils import download_file .. parsed-literal:: - 2024-02-09 22:34:07.759850: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2024-02-09 22:34:07.794264: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + 2024-03-12 22:18:47.211125: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2024-03-12 22:18:47.245088: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. .. parsed-literal:: - 2024-02-09 22:34:08.310440: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + 2024-03-12 22:18:47.761261: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT Settings @@ -95,9 +95,9 @@ Settings # The paths of the source and converted models. model_dir = Path("model") model_dir.mkdir(exist_ok=True) - + model_path = Path("model/v3-small_224_1.0_float") - + ir_path = Path("model/v3-small_224_1.0_float.xml") Download model @@ -122,12 +122,12 @@ and save it to the disk. .. parsed-literal:: - 2024-02-09 22:34:11.190073: E tensorflow/compiler/xla/stream_executor/cuda/cuda_driver.cc:266] failed call to cuInit: CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE: forward compatibility was attempted on non supported HW - 2024-02-09 22:34:11.190106: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:168] retrieving CUDA diagnostic information for host: iotg-dev-workstation-07 - 2024-02-09 22:34:11.190111: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:175] hostname: iotg-dev-workstation-07 - 2024-02-09 22:34:11.190249: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:199] libcuda reported version is: 470.223.2 - 2024-02-09 22:34:11.190264: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:203] kernel reported version is: 470.182.3 - 2024-02-09 22:34:11.190268: E tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:312] kernel version 470.182.3 does not match DSO version 470.223.2 -- cannot find working devices in this configuration + 2024-03-12 22:18:50.501463: E tensorflow/compiler/xla/stream_executor/cuda/cuda_driver.cc:266] failed call to cuInit: CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE: forward compatibility was attempted on non supported HW + 2024-03-12 22:18:50.501497: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:168] retrieving CUDA diagnostic information for host: iotg-dev-workstation-07 + 2024-03-12 22:18:50.501501: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:175] hostname: iotg-dev-workstation-07 + 2024-03-12 22:18:50.501645: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:199] libcuda reported version is: 470.223.2 + 2024-03-12 22:18:50.501661: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:203] kernel reported version is: 470.182.3 + 2024-03-12 22:18:50.501665: E tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:312] kernel version 470.182.3 does not match DSO version 470.223.2 -- cannot find working devices in this configuration .. parsed-literal:: @@ -137,13 +137,13 @@ and save it to the disk. .. parsed-literal:: - 2024-02-09 22:34:15.411337: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'inputs' with dtype float and shape [?,1,1,1024] + 2024-03-12 22:18:54.762256: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'inputs' with dtype float and shape [?,1,1,1024] [[{{node inputs}}]] .. parsed-literal:: - 2024-02-09 22:34:18.568762: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'inputs' with dtype float and shape [?,1,1,1024] + 2024-03-12 22:18:57.938498: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'inputs' with dtype float and shape [?,1,1,1024] [[{{node inputs}}]] WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _jit_compiled_convolution_op, _jit_compiled_convolution_op, _jit_compiled_convolution_op while saving (showing 5 of 54). These functions will not be directly callable after loading. @@ -219,14 +219,14 @@ select device from dropdown list for running inference using OpenVINO .. code:: ipython3 import ipywidgets as widgets - + device = widgets.Dropdown( options=core.available_devices + ["AUTO"], value='AUTO', description='Device:', disabled=False, ) - + device @@ -251,7 +251,7 @@ Get Model Information input_key = compiled_model.input(0) output_key = compiled_model.output(0) - network_input_shape = input_key.shape + network_input_shape = input_key.shape Load an Image ~~~~~~~~~~~~~ @@ -268,16 +268,16 @@ network. "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/coco.jpg", directory="data" ) - + # The MobileNet network expects images in RGB format. image = cv2.cvtColor(cv2.imread(filename=str(image_filename)), code=cv2.COLOR_BGR2RGB) - + # Resize the image to the network input shape. resized_image = cv2.resize(src=image, dsize=(224, 224)) - + # Transpose the image to the network input shape. input_image = np.expand_dims(resized_image, 0) - + plt.imshow(image); @@ -299,7 +299,7 @@ Do Inference .. code:: ipython3 result = compiled_model(input_image)[output_key] - + result_index = np.argmax(result) .. code:: ipython3 @@ -309,10 +309,10 @@ Do Inference "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/datasets/imagenet/imagenet_2012.txt", directory="data" ) - + # Convert the inference result to a class name. imagenet_classes = image_filename.read_text().splitlines() - + imagenet_classes[result_index] @@ -345,15 +345,15 @@ performance. .. code:: ipython3 num_images = 1000 - + start = time.perf_counter() - + for _ in range(num_images): compiled_model([input_image]) - + end = time.perf_counter() time_ir = end - start - + print( f"IR model in OpenVINO Runtime/CPU: {time_ir/num_images:.4f} " f"seconds per image, FPS: {num_images/time_ir:.2f}" @@ -362,5 +362,5 @@ performance. .. parsed-literal:: - IR model in OpenVINO Runtime/CPU: 0.0011 seconds per image, FPS: 926.34 + IR model in OpenVINO Runtime/CPU: 0.0011 seconds per image, FPS: 946.40 diff --git a/docs/notebooks/101-tensorflow-classification-to-openvino-with-output_files/101-tensorflow-classification-to-openvino-with-output_19_1.png b/docs/notebooks/101-tensorflow-classification-to-openvino-with-output_files/101-tensorflow-classification-to-openvino-with-output_19_1.png index 3f302494abcb62..a142093f6e675c 100644 --- a/docs/notebooks/101-tensorflow-classification-to-openvino-with-output_files/101-tensorflow-classification-to-openvino-with-output_19_1.png +++ b/docs/notebooks/101-tensorflow-classification-to-openvino-with-output_files/101-tensorflow-classification-to-openvino-with-output_19_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2dd4338c6c163e7693885ce544e8c9cd2aecedf3b136fa295e22877f37b5634c +oid sha256:5712bd24e962ae0e0267607554ebe1f2869c223b108876ce10e5d20fe6285126 size 387941 diff --git a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst index 7953e74307bbcd..d40df85f070d12 100644 --- a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst +++ b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst @@ -92,20 +92,20 @@ Imports import time import warnings from pathlib import Path - + import cv2 import numpy as np import openvino as ov import torch from torchvision.models.segmentation import lraspp_mobilenet_v3_large, LRASPP_MobileNet_V3_Large_Weights - + # Fetch `notebook_utils` module import urllib.request urllib.request.urlretrieve( url='https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/main/notebooks/utils/notebook_utils.py', filename='notebook_utils.py' ) - + from notebook_utils import segmentation_map_to_image, viz_result_image, SegmentationMap, Label, download_file Settings @@ -125,7 +125,7 @@ transforms function, the model is pre-trained on images with a height of DIRECTORY_NAME = "model" BASE_MODEL_NAME = DIRECTORY_NAME + "/lraspp_mobilenet_v3_large" weights_path = Path(BASE_MODEL_NAME + ".pt") - + # Paths where ONNX and OpenVINO IR models will be stored. onnx_path = weights_path.with_suffix('.onnx') if not onnx_path.parent.exists(): @@ -156,7 +156,7 @@ have not downloaded the model before. .. code:: ipython3 - print("Downloading the LRASPP MobileNetV3 model (if it has not been downloaded already)...") + print("Downloading the LRASPP MobileNetV3 model (if it has not been downloaded already)...") download_file(LRASPP_MobileNet_V3_Large_Weights.COCO_WITH_VOC_LABELS_V1.url, filename=weights_path.name, directory=weights_path.parent) # create model object model = lraspp_mobilenet_v3_large() @@ -294,12 +294,12 @@ Images need to be normalized before propagating through the network. "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/coco.jpg", directory="data" ) - + image = cv2.cvtColor(cv2.imread(str(image_filename)), cv2.COLOR_BGR2RGB) - + resized_image = cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT)) normalized_image = normalize(resized_image) - + # Convert the resized images to network input shape. input_image = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0) normalized_input_image = np.expand_dims(np.transpose(normalized_image, (2, 0, 1)), 0) @@ -331,7 +331,7 @@ on an image. # Instantiate OpenVINO Core core = ov.Core() - + # Read model to OpenVINO Runtime model_onnx = core.read_model(model=onnx_path) @@ -345,14 +345,14 @@ select device from dropdown list for running inference using OpenVINO .. code:: ipython3 import ipywidgets as widgets - + device = widgets.Dropdown( options=core.available_devices + ["AUTO"], value='AUTO', description='Device:', disabled=False, ) - + device @@ -368,7 +368,7 @@ select device from dropdown list for running inference using OpenVINO # Load model on device compiled_model_onnx = core.compile_model(model=model_onnx, device_name=device.value) - + # Run inference on the input image res_onnx = compiled_model_onnx([normalized_input_image])[0] @@ -403,7 +403,7 @@ be applied to each label for more convenient visualization. Label(index=20, color=(0, 64, 128), name="tv monitor") ] VOCLabels = SegmentationMap(voc_labels) - + # Convert the network result to a segmentation map and display the result. result_mask_onnx = np.squeeze(np.argmax(res_onnx, axis=1)).astype(np.uint8) viz_result_image( @@ -450,10 +450,10 @@ select device from dropdown list for running inference using OpenVINO core = ov.Core() model_ir = core.read_model(model=ir_path) compiled_model_ir = core.compile_model(model=model_ir, device_name=device.value) - + # Get input and output layers. output_layer_ir = compiled_model_ir.output(0) - + # Run inference on the input image. res_ir = compiled_model_ir([normalized_input_image])[output_layer_ir] @@ -486,7 +486,7 @@ looks the same as the output on the ONNX/OpenVINO IR models. model.eval() with torch.no_grad(): result_torch = model(torch.as_tensor(normalized_input_image).float()) - + result_mask_torch = torch.argmax(result_torch['out'], dim=1).squeeze(0).numpy().astype(np.uint8) viz_result_image( image, @@ -516,7 +516,7 @@ performance. .. code:: ipython3 num_images = 100 - + with torch.no_grad(): start = time.perf_counter() for _ in range(num_images): @@ -527,66 +527,44 @@ performance. f"PyTorch model on CPU: {time_torch/num_images:.3f} seconds per image, " f"FPS: {num_images/time_torch:.2f}" ) - - compiled_model_onnx = core.compile_model(model=model_onnx, device_name="CPU") + + compiled_model_onnx = core.compile_model(model=model_onnx, device_name=device.value) start = time.perf_counter() for _ in range(num_images): compiled_model_onnx([normalized_input_image]) end = time.perf_counter() time_onnx = end - start print( - f"ONNX model in OpenVINO Runtime/CPU: {time_onnx/num_images:.3f} " + f"ONNX model in OpenVINO Runtime/{device.value}: {time_onnx/num_images:.3f} " f"seconds per image, FPS: {num_images/time_onnx:.2f}" ) - - compiled_model_ir = core.compile_model(model=model_ir, device_name="CPU") + + compiled_model_ir = core.compile_model(model=model_ir, device_name=device.value) start = time.perf_counter() for _ in range(num_images): compiled_model_ir([input_image]) end = time.perf_counter() time_ir = end - start print( - f"OpenVINO IR model in OpenVINO Runtime/CPU: {time_ir/num_images:.3f} " + f"OpenVINO IR model in OpenVINO Runtime/{device.value}: {time_ir/num_images:.3f} " f"seconds per image, FPS: {num_images/time_ir:.2f}" ) - if "GPU" in core.available_devices: - compiled_model_onnx_gpu = core.compile_model(model=model_onnx, device_name="GPU") - start = time.perf_counter() - for _ in range(num_images): - compiled_model_onnx_gpu([input_image]) - end = time.perf_counter() - time_onnx_gpu = end - start - print( - f"ONNX model in OpenVINO/GPU: {time_onnx_gpu/num_images:.3f} " - f"seconds per image, FPS: {num_images/time_onnx_gpu:.2f}" - ) - - compiled_model_ir_gpu = core.compile_model(model=model_ir, device_name="GPU") - start = time.perf_counter() - for _ in range(num_images): - compiled_model_ir_gpu([input_image]) - end = time.perf_counter() - time_ir_gpu = end - start - print( - f"IR model in OpenVINO/GPU: {time_ir_gpu/num_images:.3f} " - f"seconds per image, FPS: {num_images/time_ir_gpu:.2f}" - ) .. parsed-literal:: - PyTorch model on CPU: 0.040 seconds per image, FPS: 24.69 + PyTorch model on CPU: 0.039 seconds per image, FPS: 25.55 .. parsed-literal:: - ONNX model in OpenVINO Runtime/CPU: 0.018 seconds per image, FPS: 56.48 + ONNX model in OpenVINO Runtime/AUTO: 0.018 seconds per image, FPS: 55.42 .. parsed-literal:: - OpenVINO IR model in OpenVINO Runtime/CPU: 0.018 seconds per image, FPS: 55.11 + OpenVINO IR model in OpenVINO Runtime/AUTO: 0.019 seconds per image, FPS: 52.62 **Show Device Information** diff --git a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output_files/102-pytorch-onnx-to-openvino-with-output_22_0.png b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output_files/102-pytorch-onnx-to-openvino-with-output_22_0.png index 7a126b5a035e8d..da104b7f7db399 100644 --- a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output_files/102-pytorch-onnx-to-openvino-with-output_22_0.png +++ b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output_files/102-pytorch-onnx-to-openvino-with-output_22_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bdc636d912f3e7146a2aacdaf744a83a90ad66778eacac5924a2499d8acb351f +oid sha256:085fc2e0ffdf71311eed99aebb0c6efafffe33ab3f01ca34c44f99feba46a565 size 465692 diff --git a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output_files/102-pytorch-onnx-to-openvino-with-output_27_0.png b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output_files/102-pytorch-onnx-to-openvino-with-output_27_0.png index 96d76cdb2702a4..910491aead8d5b 100644 --- a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output_files/102-pytorch-onnx-to-openvino-with-output_27_0.png +++ b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output_files/102-pytorch-onnx-to-openvino-with-output_27_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1481c5d43590bbae0644ab25ea265919fb1d8ecd7e4f628e25e2ef69e822c4f0 +oid sha256:fe706f0dda105330b25a3a8d1679019ef40d88c86b28a5aa2ca77449800ec939 size 465695 diff --git a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output_files/102-pytorch-onnx-to-openvino-with-output_29_0.png b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output_files/102-pytorch-onnx-to-openvino-with-output_29_0.png index 7a126b5a035e8d..da104b7f7db399 100644 --- a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output_files/102-pytorch-onnx-to-openvino-with-output_29_0.png +++ b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output_files/102-pytorch-onnx-to-openvino-with-output_29_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bdc636d912f3e7146a2aacdaf744a83a90ad66778eacac5924a2499d8acb351f +oid sha256:085fc2e0ffdf71311eed99aebb0c6efafffe33ab3f01ca34c44f99feba46a565 size 465692 diff --git a/docs/notebooks/102-pytorch-to-openvino-with-output.rst b/docs/notebooks/102-pytorch-to-openvino-with-output.rst index 8d44159c790857..2b8cf8e3426e78 100644 --- a/docs/notebooks/102-pytorch-to-openvino-with-output.rst +++ b/docs/notebooks/102-pytorch-to-openvino-with-output.rst @@ -99,23 +99,23 @@ Download input data and label map import requests from pathlib import Path from PIL import Image - + MODEL_DIR = Path("model") DATA_DIR = Path("data") - + MODEL_DIR.mkdir(exist_ok=True) DATA_DIR.mkdir(exist_ok=True) MODEL_NAME = "regnet_y_800mf" - + image = Image.open(requests.get("https://farm9.staticflickr.com/8225/8511402100_fea15da1c5_z.jpg", stream=True).raw) - + labels_file = DATA_DIR / "imagenet_2012.txt" - + if not labels_file.exists(): resp = requests.get("https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/data/dataset_classes/imagenet_2012.txt") with labels_file.open("wb") as f: f.write(resp.content) - + imagenet_classes = labels_file.open("r").read().splitlines() Load PyTorch Model @@ -141,14 +141,14 @@ enum ``RegNet_Y_800MF_Weights.DEFAULT``. .. code:: ipython3 import torchvision - + # get default weights using available weights Enum for model weights = torchvision.models.RegNet_Y_800MF_Weights.DEFAULT - + # create model topology and load weights model = torchvision.models.regnet_y_800mf(weights=weights) - - # switch model to inference mode + + # switch model to inference mode model.eval(); Prepare Input Data @@ -165,13 +165,13 @@ the first dimension. .. code:: ipython3 import torch - + # Initialize the Weight Transforms preprocess = weights.transforms() - + # Apply it to the input image img_transformed = preprocess(image) - + # Add batch dimension to image tensor input_tensor = img_transformed.unsqueeze(0) @@ -190,10 +190,10 @@ can be reused later. import numpy as np from scipy.special import softmax - + # Perform model inference on input tensor result = model(input_tensor) - + # Postprocessing function for getting results in the same way for both PyTorch model inference and OpenVINO def postprocess_result(output_tensor:np.ndarray, top_k:int = 5): """ @@ -209,10 +209,10 @@ can be reused later. topk_labels = np.argsort(softmaxed_scores)[-top_k:][::-1] topk_scores = softmaxed_scores[topk_labels] return topk_labels, topk_scores - + # Postprocess results top_labels, top_scores = postprocess_result(result.detach().numpy()) - + # Show results display(image) for idx, (label, score) in enumerate(zip(top_labels, top_scores)): @@ -241,14 +241,14 @@ Benchmark PyTorch Model Inference .. code:: ipython3 %%timeit - + # Run model inference model(input_tensor) .. parsed-literal:: - 17.6 ms ± 52.8 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) + 16.1 ms ± 389 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) Convert PyTorch Model to OpenVINO Intermediate Representation @@ -278,21 +278,21 @@ such as: and any other advanced options supported by model conversion Python API. More details can be found on this -`page `__ +`page `__ .. code:: ipython3 import openvino as ov - + # Create OpenVINO Core object instance core = ov.Core() - + # Convert model to openvino.runtime.Model object ov_model = ov.convert_model(model) - + # Save openvino.runtime.Model object on disk ov.save_model(ov_model, MODEL_DIR / f"{MODEL_NAME}_dynamic.xml") - + ov_model @@ -320,14 +320,14 @@ select device from dropdown list for running inference using OpenVINO .. code:: ipython3 import ipywidgets as widgets - + device = widgets.Dropdown( options=core.available_devices + ["AUTO"], value='AUTO', description='Device:', disabled=False, ) - + device @@ -369,10 +369,10 @@ Run OpenVINO Model Inference # Run model inference result = compiled_model(input_tensor)[0] - + # Posptorcess results top_labels, top_scores = postprocess_result(result) - + # Show results display(image) for idx, (label, score) in enumerate(zip(top_labels, top_scores)): @@ -401,13 +401,13 @@ Benchmark OpenVINO Model Inference .. code:: ipython3 %%timeit - + compiled_model(input_tensor) .. parsed-literal:: - 3.42 ms ± 7.33 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) + 3.13 ms ± 17.5 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) Convert PyTorch Model with Static Input Shape @@ -435,7 +435,7 @@ reshaping example please check the following .. parsed-literal:: - ] @@ -499,10 +499,10 @@ Run OpenVINO Model Inference with Static Input Shape # Run model inference result = compiled_model(input_tensor)[0] - + # Posptorcess results top_labels, top_scores = postprocess_result(result) - + # Show results display(image) for idx, (label, score) in enumerate(zip(top_labels, top_scores)): @@ -531,13 +531,13 @@ Benchmark OpenVINO Model Inference with Static Input Shape .. code:: ipython3 %%timeit - + compiled_model(input_tensor) .. parsed-literal:: - 2.9 ms ± 17.7 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) + 2.87 ms ± 22 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) Convert TorchScript Model to OpenVINO Intermediate Representation @@ -584,20 +584,20 @@ Reference `__ dataset, to OpenVINO IR. It also shows how to perform classification inference on a sample image, using `OpenVINO -Runtime `__ +Runtime `__ and compares the results of the `PaddlePaddle `__ model with the IR model. @@ -46,7 +46,7 @@ Imports .. code:: ipython3 import platform - + if platform.system() == "Windows": %pip install -q "paddlepaddle>=2.5.1,<2.6.0" else: @@ -71,9 +71,9 @@ Imports ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. paddleclas 2.5.1 requires easydict, which is not installed. - paddleclas 2.5.1 requires faiss-cpu==1.7.1.post2, but you have faiss-cpu 1.7.4 which is incompatible. + paddleclas 2.5.1 requires faiss-cpu==1.7.1.post2, but you have faiss-cpu 1.8.0 which is incompatible. paddleclas 2.5.1 requires gast==0.3.3, but you have gast 0.4.0 which is incompatible. - + .. parsed-literal:: @@ -94,16 +94,16 @@ Imports .. parsed-literal:: - --2024-02-09 22:36:08-- http://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2.19_amd64.deb - Resolving proxy-mu.intel.com (proxy-mu.intel.com)... 10.217.247.236 - Connecting to proxy-mu.intel.com (proxy-mu.intel.com)|10.217.247.236|:911... connected. - Proxy request sent, awaiting response... + --2024-03-12 22:20:41-- http://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2.19_amd64.deb + Resolving proxy-dmz.intel.com (proxy-dmz.intel.com)... 10.241.208.166 + Connecting to proxy-dmz.intel.com (proxy-dmz.intel.com)|10.241.208.166|:911... connected. + Proxy request sent, awaiting response... .. parsed-literal:: 404 Not Found - 2024-02-09 22:36:08 ERROR 404: Not Found. - + 2024-03-12 22:20:41 ERROR 404: Not Found. + .. parsed-literal:: @@ -116,31 +116,31 @@ Imports import time import tarfile from pathlib import Path - + import matplotlib.pyplot as plt import numpy as np import openvino as ov from paddleclas import PaddleClas from PIL import Image - + # Fetch `notebook_utils` module import urllib.request urllib.request.urlretrieve( url='https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/main/notebooks/utils/notebook_utils.py', filename='notebook_utils.py' ) - + from notebook_utils import download_file .. parsed-literal:: - 2024-02-09 22:36:10 INFO: Loading faiss with AVX2 support. + 2024-03-12 22:20:43 INFO: Loading faiss with AVX512 support. .. parsed-literal:: - 2024-02-09 22:36:10 INFO: Successfully loaded faiss with AVX2 support. + 2024-03-12 22:20:43 INFO: Successfully loaded faiss with AVX512 support. Settings @@ -168,9 +168,9 @@ PaddleHub. This may take a while. "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/coco_close.png", directory="data" ) - + IMAGE_FILENAME = img.as_posix() - + MODEL_NAME = "MobileNetV3_large_x1_0" MODEL_DIR = Path("model") if not MODEL_DIR.exists(): @@ -224,7 +224,7 @@ inference on that image, and then show the top three prediction results. .. parsed-literal:: - [2024/02/09 22:36:38] ppcls WARNING: The current running environment does not support the use of GPU. CPU has been used instead. + [2024/03/12 22:21:11] ppcls WARNING: The current running environment does not support the use of GPU. CPU has been used instead. .. parsed-literal:: @@ -271,8 +271,8 @@ the same method. .. code:: ipython3 preprocess_ops = classifier.predictor.preprocess_ops - - + + def process_image(image): for op in preprocess_ops: image = op(image) @@ -294,7 +294,7 @@ clipping values. .. parsed-literal:: - 2024-02-09 22:36:39 WARNING: Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). + 2024-03-12 22:21:12 WARNING: Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). .. parsed-literal:: @@ -306,7 +306,7 @@ clipping values. .. parsed-literal:: - + @@ -363,7 +363,7 @@ select device from dropdown list for running inference using OpenVINO .. code:: ipython3 import ipywidgets as widgets - + core = ov.Core() device = widgets.Dropdown( options=core.available_devices + ["AUTO"], @@ -371,7 +371,7 @@ select device from dropdown list for running inference using OpenVINO description='Device:', disabled=False, ) - + device @@ -399,24 +399,24 @@ information. # Load OpenVINO Runtime and OpenVINO IR model core = ov.Core() model = core.read_model(model_xml) - compiled_model = core.compile_model(model=model, device_name="CPU") - + compiled_model = core.compile_model(model=model, device_name=device.value) + # Get model output output_layer = compiled_model.output(0) - + # Read, show, and preprocess input image # See the "Show Inference on PaddlePaddle Model" section for source of process_image image = Image.open(IMAGE_FILENAME) plt.imshow(image) input_image = process_image(np.array(image))[None,] - + # Do inference ov_result = compiled_model([input_image])[output_layer][0] - + # find the top three values top_indices = np.argsort(ov_result)[-3:][::-1] top_scores = ov_result[top_indices] - + # Convert the inference results to class names, using the same labels as the PaddlePaddle classifier for index, softmax_probability in zip(top_indices, top_scores): print(f"{class_id_map[index]}, {softmax_probability:.5f}") @@ -448,7 +448,7 @@ Note that many optimizations are possible to improve the performance. .. code:: ipython3 num_images = 50 - + image = Image.open(fp=IMAGE_FILENAME) .. code:: ipython3 @@ -456,7 +456,7 @@ Note that many optimizations are possible to improve the performance. # Show device information core = ov.Core() devices = core.available_devices - + for device_name in devices: device_full_name = core.get_property(device_name, "FULL_DEVICE_NAME") print(f"{device_name}: {device_full_name}") @@ -489,8 +489,8 @@ Note that many optimizations are possible to improve the performance. .. parsed-literal:: - PaddlePaddle model on CPU: 0.0075 seconds per image, FPS: 133.16 - + PaddlePaddle model on CPU: 0.0074 seconds per image, FPS: 134.43 + PaddlePaddle result: Labrador retriever, 0.75138 German short-haired pointer, 0.02373 @@ -528,18 +528,18 @@ select device from dropdown list for running inference using OpenVINO # Show inference speed on OpenVINO IR model compiled_model = core.compile_model(model=model, device_name=device.value) output_layer = compiled_model.output(0) - - + + start = time.perf_counter() input_image = process_image(np.array(image))[None,] for _ in range(num_images): ie_result = compiled_model([input_image])[output_layer][0] top_indices = np.argsort(ie_result)[-5:][::-1] top_softmax = ie_result[top_indices] - + end = time.perf_counter() time_ir = end - start - + print( f"OpenVINO IR model in OpenVINO Runtime ({device.value}): {time_ir/num_images:.4f} " f"seconds per image, FPS: {num_images/time_ir:.2f}" @@ -553,8 +553,8 @@ select device from dropdown list for running inference using OpenVINO .. parsed-literal:: - OpenVINO IR model in OpenVINO Runtime (AUTO): 0.0030 seconds per image, FPS: 328.87 - + OpenVINO IR model in OpenVINO Runtime (AUTO): 0.0028 seconds per image, FPS: 353.76 + OpenVINO result: Labrador retriever, 0.74909 German short-haired pointer, 0.02368 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_15_3.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_15_3.png index 87f3978ae62d26..35e0c81123f0a1 100644 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_15_3.png +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_15_3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37fe65815997e6f67c6a635a98caf7ad3d5066aee57709f650fe8ef4c8bdfe11 +oid sha256:99b8398ef76f2959d210e2d30bb44420f8d34a885a4480bc26e2af6627ba7119 size 120883 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_23_1.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_23_1.png index 87b70a6bf1d23f..35c91e327be1ce 100644 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_23_1.png +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_23_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22866fa0a0a063c7772d4a884812ca79fb0737c1eb2281bc825ee18eded729c5 +oid sha256:1381e5922057c6bc70eb4ba9a04f3164382ad01191d320c1acbc819e7261f8c1 size 224886 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_27_1.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_27_1.png index 87b70a6bf1d23f..35c91e327be1ce 100644 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_27_1.png +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_27_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22866fa0a0a063c7772d4a884812ca79fb0737c1eb2281bc825ee18eded729c5 +oid sha256:1381e5922057c6bc70eb4ba9a04f3164382ad01191d320c1acbc819e7261f8c1 size 224886 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_30_1.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_30_1.png index 87b70a6bf1d23f..35c91e327be1ce 100644 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_30_1.png +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_30_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22866fa0a0a063c7772d4a884812ca79fb0737c1eb2281bc825ee18eded729c5 +oid sha256:1381e5922057c6bc70eb4ba9a04f3164382ad01191d320c1acbc819e7261f8c1 size 224886 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_8_2.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_8_2.png index 87b70a6bf1d23f..35c91e327be1ce 100644 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_8_2.png +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_8_2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22866fa0a0a063c7772d4a884812ca79fb0737c1eb2281bc825ee18eded729c5 +oid sha256:1381e5922057c6bc70eb4ba9a04f3164382ad01191d320c1acbc819e7261f8c1 size 224886 diff --git a/docs/notebooks/104-model-tools-with-output.rst b/docs/notebooks/104-model-tools-with-output.rst index 46686ff41b1973..ed9b11c69f1329 100644 --- a/docs/notebooks/104-model-tools-with-output.rst +++ b/docs/notebooks/104-model-tools-with-output.rst @@ -53,7 +53,7 @@ OpenVINO and Open Model Zoo tools are listed in the table below. .. code:: ipython3 # Install openvino package - %pip install -q "openvino-dev>=2023.1.0" + %pip install -q "openvino-dev>=2024.0.0" .. parsed-literal:: @@ -132,7 +132,7 @@ The following settings can be changed: omz_cache_dir = Path("cache") precision = "FP16" - # Check if an iGPU is available on this system to use with Benchmark App. + # Check if an GPU is available on this system to use with Benchmark App. core = ov.Core() gpu_available = "GPU" in core.available_devices @@ -183,667 +183,470 @@ Downloading mobilenet-v2-pytorch… ################|| Downloading mobilenet-v2-pytorch ||################ - ========== Downloading model/public/mobilenet-v2-pytorch/mobilenet_v2-b0353104.pth - - -.. parsed-literal:: - - ... 0%, 32 KB, 1147 KB/s, 0 seconds passed -... 0%, 64 KB, 1047 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 0%, 96 KB, 1108 KB/s, 0 seconds passed -... 0%, 128 KB, 1467 KB/s, 0 seconds passed -... 1%, 160 KB, 1676 KB/s, 0 seconds passed -... 1%, 192 KB, 1999 KB/s, 0 seconds passed -... 1%, 224 KB, 1847 KB/s, 0 seconds passed -... 1%, 256 KB, 2102 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 2%, 288 KB, 2214 KB/s, 0 seconds passed -... 2%, 320 KB, 2445 KB/s, 0 seconds passed -... 2%, 352 KB, 2259 KB/s, 0 seconds passed -... 2%, 384 KB, 2457 KB/s, 0 seconds passed -... 2%, 416 KB, 2526 KB/s, 0 seconds passed -... 3%, 448 KB, 2712 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 3%, 480 KB, 2525 KB/s, 0 seconds passed -... 3%, 512 KB, 2682 KB/s, 0 seconds passed -... 3%, 544 KB, 2734 KB/s, 0 seconds passed -... 4%, 576 KB, 2884 KB/s, 0 seconds passed -... 4%, 608 KB, 2702 KB/s, 0 seconds passed -... 4%, 640 KB, 2834 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 4%, 672 KB, 2871 KB/s, 0 seconds passed -... 5%, 704 KB, 2999 KB/s, 0 seconds passed -... 5%, 736 KB, 2837 KB/s, 0 seconds passed -... 5%, 768 KB, 2950 KB/s, 0 seconds passed -... 5%, 800 KB, 2978 KB/s, 0 seconds passed -... 5%, 832 KB, 3091 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 6%, 864 KB, 2935 KB/s, 0 seconds passed -... 6%, 896 KB, 3035 KB/s, 0 seconds passed -... 6%, 928 KB, 3064 KB/s, 0 seconds passed -... 6%, 960 KB, 3161 KB/s, 0 seconds passed -... 7%, 992 KB, 3022 KB/s, 0 seconds passed -... 7%, 1024 KB, 3109 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 7%, 1056 KB, 3127 KB/s, 0 seconds passed -... 7%, 1088 KB, 3216 KB/s, 0 seconds passed -... 8%, 1120 KB, 3088 KB/s, 0 seconds passed -... 8%, 1152 KB, 3165 KB/s, 0 seconds passed -... 8%, 1184 KB, 3177 KB/s, 0 seconds passed -... 8%, 1216 KB, 3258 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 8%, 1248 KB, 3135 KB/s, 0 seconds passed -... 9%, 1280 KB, 3208 KB/s, 0 seconds passed -... 9%, 1312 KB, 3219 KB/s, 0 seconds passed -... 9%, 1344 KB, 3294 KB/s, 0 seconds passed -... 9%, 1376 KB, 3183 KB/s, 0 seconds passed -... 10%, 1408 KB, 3249 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 10%, 1440 KB, 3262 KB/s, 0 seconds passed -... 10%, 1472 KB, 3325 KB/s, 0 seconds passed -... 10%, 1504 KB, 3218 KB/s, 0 seconds passed -... 11%, 1536 KB, 3281 KB/s, 0 seconds passed -... 11%, 1568 KB, 3288 KB/s, 0 seconds passed -... 11%, 1600 KB, 3351 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 11%, 1632 KB, 3249 KB/s, 0 seconds passed -... 11%, 1664 KB, 3309 KB/s, 0 seconds passed -... 12%, 1696 KB, 3314 KB/s, 0 seconds passed -... 12%, 1728 KB, 3374 KB/s, 0 seconds passed -... 12%, 1760 KB, 3288 KB/s, 0 seconds passed -... 12%, 1792 KB, 3335 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 13%, 1824 KB, 3350 KB/s, 0 seconds passed -... 13%, 1856 KB, 3394 KB/s, 0 seconds passed -... 13%, 1888 KB, 3312 KB/s, 0 seconds passed -... 13%, 1920 KB, 3357 KB/s, 0 seconds passed -... 14%, 1952 KB, 3372 KB/s, 0 seconds passed -... 14%, 1984 KB, 3413 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 14%, 2016 KB, 3335 KB/s, 0 seconds passed -... 14%, 2048 KB, 3378 KB/s, 0 seconds passed -... 14%, 2080 KB, 3388 KB/s, 0 seconds passed -... 15%, 2112 KB, 3430 KB/s, 0 seconds passed -... 15%, 2144 KB, 3356 KB/s, 0 seconds passed -... 15%, 2176 KB, 3394 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 15%, 2208 KB, 3404 KB/s, 0 seconds passed -... 16%, 2240 KB, 3445 KB/s, 0 seconds passed -... 16%, 2272 KB, 3373 KB/s, 0 seconds passed -... 16%, 2304 KB, 3409 KB/s, 0 seconds passed -... 16%, 2336 KB, 3417 KB/s, 0 seconds passed -... 17%, 2368 KB, 3457 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 17%, 2400 KB, 3390 KB/s, 0 seconds passed -... 17%, 2432 KB, 3422 KB/s, 0 seconds passed -... 17%, 2464 KB, 3429 KB/s, 0 seconds passed -... 17%, 2496 KB, 3366 KB/s, 0 seconds passed -... 18%, 2528 KB, 3399 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 18%, 2560 KB, 3433 KB/s, 0 seconds passed -... 18%, 2592 KB, 3440 KB/s, 0 seconds passed -... 18%, 2624 KB, 3478 KB/s, 0 seconds passed -... 19%, 2656 KB, 3415 KB/s, 0 seconds passed -... 19%, 2688 KB, 3444 KB/s, 0 seconds passed -... 19%, 2720 KB, 3450 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 19%, 2752 KB, 3396 KB/s, 0 seconds passed -... 20%, 2784 KB, 3424 KB/s, 0 seconds passed -... 20%, 2816 KB, 3453 KB/s, 0 seconds passed -... 20%, 2848 KB, 3461 KB/s, 0 seconds passed -... 20%, 2880 KB, 3409 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 20%, 2912 KB, 3434 KB/s, 0 seconds passed -... 21%, 2944 KB, 3451 KB/s, 0 seconds passed -... 21%, 2976 KB, 3468 KB/s, 0 seconds passed -... 21%, 3008 KB, 3418 KB/s, 0 seconds passed -... 21%, 3040 KB, 3445 KB/s, 0 seconds passed -... 22%, 3072 KB, 3461 KB/s, 0 seconds passed -... 22%, 3104 KB, 3476 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 22%, 3136 KB, 3432 KB/s, 0 seconds passed -... 22%, 3168 KB, 3457 KB/s, 0 seconds passed -... 23%, 3200 KB, 3485 KB/s, 0 seconds passed -... 23%, 3232 KB, 3489 KB/s, 0 seconds passed -... 23%, 3264 KB, 3442 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 23%, 3296 KB, 3465 KB/s, 0 seconds passed -... 23%, 3328 KB, 3493 KB/s, 0 seconds passed -... 24%, 3360 KB, 3497 KB/s, 0 seconds passed -... 24%, 3392 KB, 3452 KB/s, 0 seconds passed -... 24%, 3424 KB, 3475 KB/s, 0 seconds passed -... 24%, 3456 KB, 3501 KB/s, 0 seconds passed -... 25%, 3488 KB, 3503 KB/s, 0 seconds passed - -.. parsed-literal:: - - ... 25%, 3520 KB, 3460 KB/s, 1 seconds passed -... 25%, 3552 KB, 3479 KB/s, 1 seconds passed -... 25%, 3584 KB, 3506 KB/s, 1 seconds passed -... 26%, 3616 KB, 3508 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 26%, 3648 KB, 3466 KB/s, 1 seconds passed -... 26%, 3680 KB, 3485 KB/s, 1 seconds passed -... 26%, 3712 KB, 3512 KB/s, 1 seconds passed -... 26%, 3744 KB, 3515 KB/s, 1 seconds passed -... 27%, 3776 KB, 3474 KB/s, 1 seconds passed -... 27%, 3808 KB, 3495 KB/s, 1 seconds passed -... 27%, 3840 KB, 3519 KB/s, 1 seconds passed -... 27%, 3872 KB, 3522 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 28%, 3904 KB, 3481 KB/s, 1 seconds passed -... 28%, 3936 KB, 3502 KB/s, 1 seconds passed -... 28%, 3968 KB, 3525 KB/s, 1 seconds passed -... 28%, 4000 KB, 3527 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 29%, 4032 KB, 3491 KB/s, 1 seconds passed -... 29%, 4064 KB, 3508 KB/s, 1 seconds passed -... 29%, 4096 KB, 3531 KB/s, 1 seconds passed -... 29%, 4128 KB, 3532 KB/s, 1 seconds passed -... 29%, 4160 KB, 3497 KB/s, 1 seconds passed -... 30%, 4192 KB, 3515 KB/s, 1 seconds passed -... 30%, 4224 KB, 3536 KB/s, 1 seconds passed -... 30%, 4256 KB, 3537 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 30%, 4288 KB, 3498 KB/s, 1 seconds passed -... 31%, 4320 KB, 3515 KB/s, 1 seconds passed -... 31%, 4352 KB, 3529 KB/s, 1 seconds passed -... 31%, 4384 KB, 3539 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 31%, 4416 KB, 3505 KB/s, 1 seconds passed -... 32%, 4448 KB, 3522 KB/s, 1 seconds passed -... 32%, 4480 KB, 3535 KB/s, 1 seconds passed -... 32%, 4512 KB, 3544 KB/s, 1 seconds passed -... 32%, 4544 KB, 3511 KB/s, 1 seconds passed -... 32%, 4576 KB, 3527 KB/s, 1 seconds passed -... 33%, 4608 KB, 3549 KB/s, 1 seconds passed -... 33%, 4640 KB, 3548 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 33%, 4672 KB, 3517 KB/s, 1 seconds passed -... 33%, 4704 KB, 3533 KB/s, 1 seconds passed -... 34%, 4736 KB, 3545 KB/s, 1 seconds passed -... 34%, 4768 KB, 3553 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 34%, 4800 KB, 3522 KB/s, 1 seconds passed -... 34%, 4832 KB, 3537 KB/s, 1 seconds passed -... 35%, 4864 KB, 3550 KB/s, 1 seconds passed -... 35%, 4896 KB, 3557 KB/s, 1 seconds passed -... 35%, 4928 KB, 3525 KB/s, 1 seconds passed -... 35%, 4960 KB, 3539 KB/s, 1 seconds passed -... 35%, 4992 KB, 3553 KB/s, 1 seconds passed -... 36%, 5024 KB, 3561 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 36%, 5056 KB, 3532 KB/s, 1 seconds passed -... 36%, 5088 KB, 3546 KB/s, 1 seconds passed -... 36%, 5120 KB, 3558 KB/s, 1 seconds passed -... 37%, 5152 KB, 3564 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 37%, 5184 KB, 3534 KB/s, 1 seconds passed -... 37%, 5216 KB, 3546 KB/s, 1 seconds passed -... 37%, 5248 KB, 3559 KB/s, 1 seconds passed -... 38%, 5280 KB, 3566 KB/s, 1 seconds passed -... 38%, 5312 KB, 3536 KB/s, 1 seconds passed -... 38%, 5344 KB, 3549 KB/s, 1 seconds passed -... 38%, 5376 KB, 3563 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 38%, 5408 KB, 3570 KB/s, 1 seconds passed -... 39%, 5440 KB, 3540 KB/s, 1 seconds passed -... 39%, 5472 KB, 3554 KB/s, 1 seconds passed -... 39%, 5504 KB, 3565 KB/s, 1 seconds passed -... 39%, 5536 KB, 3573 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 40%, 5568 KB, 3543 KB/s, 1 seconds passed -... 40%, 5600 KB, 3557 KB/s, 1 seconds passed -... 40%, 5632 KB, 3567 KB/s, 1 seconds passed -... 40%, 5664 KB, 3575 KB/s, 1 seconds passed -... 41%, 5696 KB, 3545 KB/s, 1 seconds passed -... 41%, 5728 KB, 3560 KB/s, 1 seconds passed -... 41%, 5760 KB, 3570 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 41%, 5792 KB, 3577 KB/s, 1 seconds passed -... 41%, 5824 KB, 3548 KB/s, 1 seconds passed -... 42%, 5856 KB, 3561 KB/s, 1 seconds passed -... 42%, 5888 KB, 3572 KB/s, 1 seconds passed -... 42%, 5920 KB, 3579 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 42%, 5952 KB, 3551 KB/s, 1 seconds passed -... 43%, 5984 KB, 3564 KB/s, 1 seconds passed -... 43%, 6016 KB, 3574 KB/s, 1 seconds passed -... 43%, 6048 KB, 3582 KB/s, 1 seconds passed -... 43%, 6080 KB, 3555 KB/s, 1 seconds passed -... 44%, 6112 KB, 3568 KB/s, 1 seconds passed -... 44%, 6144 KB, 3577 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 44%, 6176 KB, 3585 KB/s, 1 seconds passed -... 44%, 6208 KB, 3558 KB/s, 1 seconds passed -... 44%, 6240 KB, 3570 KB/s, 1 seconds passed -... 45%, 6272 KB, 3580 KB/s, 1 seconds passed -... 45%, 6304 KB, 3587 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 45%, 6336 KB, 3561 KB/s, 1 seconds passed -... 45%, 6368 KB, 3574 KB/s, 1 seconds passed -... 46%, 6400 KB, 3582 KB/s, 1 seconds passed -... 46%, 6432 KB, 3589 KB/s, 1 seconds passed -... 46%, 6464 KB, 3562 KB/s, 1 seconds passed -... 46%, 6496 KB, 3575 KB/s, 1 seconds passed -... 47%, 6528 KB, 3585 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 47%, 6560 KB, 3593 KB/s, 1 seconds passed -... 47%, 6592 KB, 3567 KB/s, 1 seconds passed -... 47%, 6624 KB, 3579 KB/s, 1 seconds passed -... 47%, 6656 KB, 3588 KB/s, 1 seconds passed -... 48%, 6688 KB, 3593 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 48%, 6720 KB, 3567 KB/s, 1 seconds passed -... 48%, 6752 KB, 3581 KB/s, 1 seconds passed -... 48%, 6784 KB, 3590 KB/s, 1 seconds passed -... 49%, 6816 KB, 3597 KB/s, 1 seconds passed -... 49%, 6848 KB, 3572 KB/s, 1 seconds passed -... 49%, 6880 KB, 3583 KB/s, 1 seconds passed -... 49%, 6912 KB, 3592 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 50%, 6944 KB, 3599 KB/s, 1 seconds passed -... 50%, 6976 KB, 3574 KB/s, 1 seconds passed -... 50%, 7008 KB, 3585 KB/s, 1 seconds passed -... 50%, 7040 KB, 3593 KB/s, 1 seconds passed -... 50%, 7072 KB, 3600 KB/s, 1 seconds passed - -.. parsed-literal:: - - ... 51%, 7104 KB, 3575 KB/s, 1 seconds passed -... 51%, 7136 KB, 3586 KB/s, 1 seconds passed -... 51%, 7168 KB, 3595 KB/s, 1 seconds passed -... 51%, 7200 KB, 3602 KB/s, 1 seconds passed -... 52%, 7232 KB, 3579 KB/s, 2 seconds passed -... 52%, 7264 KB, 3590 KB/s, 2 seconds passed -... 52%, 7296 KB, 3598 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 52%, 7328 KB, 3604 KB/s, 2 seconds passed -... 53%, 7360 KB, 3579 KB/s, 2 seconds passed -... 53%, 7392 KB, 3591 KB/s, 2 seconds passed -... 53%, 7424 KB, 3599 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 53%, 7456 KB, 3573 KB/s, 2 seconds passed -... 53%, 7488 KB, 3580 KB/s, 2 seconds passed -... 54%, 7520 KB, 3591 KB/s, 2 seconds passed -... 54%, 7552 KB, 3599 KB/s, 2 seconds passed -... 54%, 7584 KB, 3574 KB/s, 2 seconds passed -... 54%, 7616 KB, 3581 KB/s, 2 seconds passed -... 55%, 7648 KB, 3593 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 55%, 7680 KB, 3598 KB/s, 2 seconds passed -... 55%, 7712 KB, 3575 KB/s, 2 seconds passed -... 55%, 7744 KB, 3583 KB/s, 2 seconds passed -... 56%, 7776 KB, 3593 KB/s, 2 seconds passed -... 56%, 7808 KB, 3598 KB/s, 2 seconds passed -.. parsed-literal:: - - ... 56%, 7840 KB, 3577 KB/s, 2 seconds passed -... 56%, 7872 KB, 3584 KB/s, 2 seconds passed -... 56%, 7904 KB, 3595 KB/s, 2 seconds passed -... 57%, 7936 KB, 3600 KB/s, 2 seconds passed -... 57%, 7968 KB, 3578 KB/s, 2 seconds passed -... 57%, 8000 KB, 3586 KB/s, 2 seconds passed -... 57%, 8032 KB, 3597 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 58%, 8064 KB, 3601 KB/s, 2 seconds passed -... 58%, 8096 KB, 3579 KB/s, 2 seconds passed -... 58%, 8128 KB, 3588 KB/s, 2 seconds passed -... 58%, 8160 KB, 3598 KB/s, 2 seconds passed -... 59%, 8192 KB, 3602 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 59%, 8224 KB, 3581 KB/s, 2 seconds passed -... 59%, 8256 KB, 3589 KB/s, 2 seconds passed -... 59%, 8288 KB, 3599 KB/s, 2 seconds passed -... 59%, 8320 KB, 3604 KB/s, 2 seconds passed -... 60%, 8352 KB, 3582 KB/s, 2 seconds passed -... 60%, 8384 KB, 3591 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 60%, 8416 KB, 3602 KB/s, 2 seconds passed -... 60%, 8448 KB, 3605 KB/s, 2 seconds passed -... 61%, 8480 KB, 3584 KB/s, 2 seconds passed -... 61%, 8512 KB, 3594 KB/s, 2 seconds passed -... 61%, 8544 KB, 3604 KB/s, 2 seconds passed -... 61%, 8576 KB, 3611 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 62%, 8608 KB, 3588 KB/s, 2 seconds passed -... 62%, 8640 KB, 3596 KB/s, 2 seconds passed -... 62%, 8672 KB, 3606 KB/s, 2 seconds passed -... 62%, 8704 KB, 3613 KB/s, 2 seconds passed -... 62%, 8736 KB, 3589 KB/s, 2 seconds passed -... 63%, 8768 KB, 3599 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 63%, 8800 KB, 3607 KB/s, 2 seconds passed -... 63%, 8832 KB, 3615 KB/s, 2 seconds passed -... 63%, 8864 KB, 3594 KB/s, 2 seconds passed -... 64%, 8896 KB, 3600 KB/s, 2 seconds passed -... 64%, 8928 KB, 3608 KB/s, 2 seconds passed -... 64%, 8960 KB, 3616 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 64%, 8992 KB, 3596 KB/s, 2 seconds passed -... 65%, 9024 KB, 3600 KB/s, 2 seconds passed -... 65%, 9056 KB, 3609 KB/s, 2 seconds passed -... 65%, 9088 KB, 3617 KB/s, 2 seconds passed -... 65%, 9120 KB, 3594 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 65%, 9152 KB, 3601 KB/s, 2 seconds passed -... 66%, 9184 KB, 3610 KB/s, 2 seconds passed -... 66%, 9216 KB, 3613 KB/s, 2 seconds passed -... 66%, 9248 KB, 3593 KB/s, 2 seconds passed -... 66%, 9280 KB, 3601 KB/s, 2 seconds passed -... 67%, 9312 KB, 3611 KB/s, 2 seconds passed -... 67%, 9344 KB, 3613 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 67%, 9376 KB, 3595 KB/s, 2 seconds passed -... 67%, 9408 KB, 3603 KB/s, 2 seconds passed -... 68%, 9440 KB, 3612 KB/s, 2 seconds passed -... 68%, 9472 KB, 3616 KB/s, 2 seconds passed -... 68%, 9504 KB, 3596 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 68%, 9536 KB, 3605 KB/s, 2 seconds passed -... 68%, 9568 KB, 3613 KB/s, 2 seconds passed -... 69%, 9600 KB, 3617 KB/s, 2 seconds passed -... 69%, 9632 KB, 3597 KB/s, 2 seconds passed -... 69%, 9664 KB, 3605 KB/s, 2 seconds passed -... 69%, 9696 KB, 3614 KB/s, 2 seconds passed -... 70%, 9728 KB, 3618 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 70%, 9760 KB, 3598 KB/s, 2 seconds passed -... 70%, 9792 KB, 3606 KB/s, 2 seconds passed -... 70%, 9824 KB, 3614 KB/s, 2 seconds passed -... 71%, 9856 KB, 3619 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 71%, 9888 KB, 3599 KB/s, 2 seconds passed -... 71%, 9920 KB, 3607 KB/s, 2 seconds passed -... 71%, 9952 KB, 3615 KB/s, 2 seconds passed -... 71%, 9984 KB, 3620 KB/s, 2 seconds passed -... 72%, 10016 KB, 3601 KB/s, 2 seconds passed -... 72%, 10048 KB, 3608 KB/s, 2 seconds passed -... 72%, 10080 KB, 3617 KB/s, 2 seconds passed -... 72%, 10112 KB, 3621 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 73%, 10144 KB, 3602 KB/s, 2 seconds passed -... 73%, 10176 KB, 3609 KB/s, 2 seconds passed -... 73%, 10208 KB, 3617 KB/s, 2 seconds passed -... 73%, 10240 KB, 3622 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 74%, 10272 KB, 3602 KB/s, 2 seconds passed -... 74%, 10304 KB, 3610 KB/s, 2 seconds passed -... 74%, 10336 KB, 3617 KB/s, 2 seconds passed -... 74%, 10368 KB, 3623 KB/s, 2 seconds passed -... 74%, 10400 KB, 3603 KB/s, 2 seconds passed -... 75%, 10432 KB, 3610 KB/s, 2 seconds passed -... 75%, 10464 KB, 3619 KB/s, 2 seconds passed -... 75%, 10496 KB, 3623 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 75%, 10528 KB, 3605 KB/s, 2 seconds passed -... 76%, 10560 KB, 3611 KB/s, 2 seconds passed -... 76%, 10592 KB, 3620 KB/s, 2 seconds passed -... 76%, 10624 KB, 3624 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 76%, 10656 KB, 3605 KB/s, 2 seconds passed -... 77%, 10688 KB, 3611 KB/s, 2 seconds passed -... 77%, 10720 KB, 3620 KB/s, 2 seconds passed -... 77%, 10752 KB, 3624 KB/s, 2 seconds passed -... 77%, 10784 KB, 3606 KB/s, 2 seconds passed -... 77%, 10816 KB, 3613 KB/s, 2 seconds passed -... 78%, 10848 KB, 3621 KB/s, 2 seconds passed -... 78%, 10880 KB, 3627 KB/s, 2 seconds passed - -.. parsed-literal:: - - ... 78%, 10912 KB, 3609 KB/s, 3 seconds passed -... 78%, 10944 KB, 3615 KB/s, 3 seconds passed -... 79%, 10976 KB, 3623 KB/s, 3 seconds passed -... 79%, 11008 KB, 3628 KB/s, 3 seconds passed - -.. parsed-literal:: - - ... 79%, 11040 KB, 3609 KB/s, 3 seconds passed -... 79%, 11072 KB, 3615 KB/s, 3 seconds passed -... 80%, 11104 KB, 3624 KB/s, 3 seconds passed -... 80%, 11136 KB, 3629 KB/s, 3 seconds passed -... 80%, 11168 KB, 3611 KB/s, 3 seconds passed -... 80%, 11200 KB, 3617 KB/s, 3 seconds passed -... 80%, 11232 KB, 3625 KB/s, 3 seconds passed -... 81%, 11264 KB, 3630 KB/s, 3 seconds passed - -.. parsed-literal:: - - ... 81%, 11296 KB, 3612 KB/s, 3 seconds passed -... 81%, 11328 KB, 3618 KB/s, 3 seconds passed -... 81%, 11360 KB, 3626 KB/s, 3 seconds passed -... 82%, 11392 KB, 3629 KB/s, 3 seconds passed - -.. parsed-literal:: - - ... 82%, 11424 KB, 3611 KB/s, 3 seconds passed -... 82%, 11456 KB, 3617 KB/s, 3 seconds passed -... 82%, 11488 KB, 3626 KB/s, 3 seconds passed -... 82%, 11520 KB, 3610 KB/s, 3 seconds passed -... 83%, 11552 KB, 3612 KB/s, 3 seconds passed -... 83%, 11584 KB, 3618 KB/s, 3 seconds passed -... 83%, 11616 KB, 3626 KB/s, 3 seconds passed - -.. parsed-literal:: - - ... 83%, 11648 KB, 3611 KB/s, 3 seconds passed -... 84%, 11680 KB, 3613 KB/s, 3 seconds passed -... 84%, 11712 KB, 3618 KB/s, 3 seconds passed -... 84%, 11744 KB, 3627 KB/s, 3 seconds passed -... 84%, 11776 KB, 3612 KB/s, 3 seconds passed - -.. parsed-literal:: - - ... 85%, 11808 KB, 3613 KB/s, 3 seconds passed -... 85%, 11840 KB, 3620 KB/s, 3 seconds passed -... 85%, 11872 KB, 3628 KB/s, 3 seconds passed -... 85%, 11904 KB, 3610 KB/s, 3 seconds passed -... 85%, 11936 KB, 3614 KB/s, 3 seconds passed -... 86%, 11968 KB, 3620 KB/s, 3 seconds passed -... 86%, 12000 KB, 3629 KB/s, 3 seconds passed - -.. parsed-literal:: - - ... 86%, 12032 KB, 3610 KB/s, 3 seconds passed -... 86%, 12064 KB, 3615 KB/s, 3 seconds passed -... 87%, 12096 KB, 3621 KB/s, 3 seconds passed -... 87%, 12128 KB, 3630 KB/s, 3 seconds passed - -.. parsed-literal:: - - ... 87%, 12160 KB, 3611 KB/s, 3 seconds passed -... 87%, 12192 KB, 3612 KB/s, 3 seconds passed -... 88%, 12224 KB, 3621 KB/s, 3 seconds passed -... 88%, 12256 KB, 3630 KB/s, 3 seconds passed -... 88%, 12288 KB, 3612 KB/s, 3 seconds passed -... 88%, 12320 KB, 3614 KB/s, 3 seconds passed -... 88%, 12352 KB, 3622 KB/s, 3 seconds passed -... 89%, 12384 KB, 3631 KB/s, 3 seconds passed - -.. parsed-literal:: - - ... 89%, 12416 KB, 3617 KB/s, 3 seconds passed -... 89%, 12448 KB, 3618 KB/s, 3 seconds passed -... 89%, 12480 KB, 3623 KB/s, 3 seconds passed -... 90%, 12512 KB, 3632 KB/s, 3 seconds passed - -.. parsed-literal:: - - ... 90%, 12544 KB, 3618 KB/s, 3 seconds passed -... 90%, 12576 KB, 3615 KB/s, 3 seconds passed -... 90%, 12608 KB, 3624 KB/s, 3 seconds passed -... 91%, 12640 KB, 3632 KB/s, 3 seconds passed -... 91%, 12672 KB, 3619 KB/s, 3 seconds passed -... 91%, 12704 KB, 3619 KB/s, 3 seconds passed -... 91%, 12736 KB, 3624 KB/s, 3 seconds passed -... 91%, 12768 KB, 3633 KB/s, 3 seconds passed - -.. parsed-literal:: - - ... 92%, 12800 KB, 3616 KB/s, 3 seconds passed -... 92%, 12832 KB, 3620 KB/s, 3 seconds passed -... 92%, 12864 KB, 3625 KB/s, 3 seconds passed -... 92%, 12896 KB, 3633 KB/s, 3 seconds passed .. parsed-literal:: - ... 93%, 12928 KB, 3617 KB/s, 3 seconds passed -... 93%, 12960 KB, 3621 KB/s, 3 seconds passed -... 93%, 12992 KB, 3626 KB/s, 3 seconds passed -... 93%, 13024 KB, 3634 KB/s, 3 seconds passed -... 94%, 13056 KB, 3618 KB/s, 3 seconds passed -... 94%, 13088 KB, 3622 KB/s, 3 seconds passed -... 94%, 13120 KB, 3627 KB/s, 3 seconds passed -... 94%, 13152 KB, 3634 KB/s, 3 seconds passed - -.. parsed-literal:: - - ... 94%, 13184 KB, 3619 KB/s, 3 seconds passed -... 95%, 13216 KB, 3622 KB/s, 3 seconds passed -... 95%, 13248 KB, 3628 KB/s, 3 seconds passed -... 95%, 13280 KB, 3635 KB/s, 3 seconds passed - -.. parsed-literal:: - - ... 95%, 13312 KB, 3620 KB/s, 3 seconds passed -... 96%, 13344 KB, 3623 KB/s, 3 seconds passed -... 96%, 13376 KB, 3628 KB/s, 3 seconds passed -... 96%, 13408 KB, 3636 KB/s, 3 seconds passed -... 96%, 13440 KB, 3620 KB/s, 3 seconds passed -... 97%, 13472 KB, 3624 KB/s, 3 seconds passed - -.. parsed-literal:: - - ... 97%, 13504 KB, 3628 KB/s, 3 seconds passed -... 97%, 13536 KB, 3636 KB/s, 3 seconds passed -... 97%, 13568 KB, 3621 KB/s, 3 seconds passed -... 97%, 13600 KB, 3625 KB/s, 3 seconds passed -... 98%, 13632 KB, 3629 KB/s, 3 seconds passed -... 98%, 13664 KB, 3637 KB/s, 3 seconds passed - -.. parsed-literal:: + ========== Downloading model/public/mobilenet-v2-pytorch/mobilenet_v2-b0353104.pth - ... 98%, 13696 KB, 3622 KB/s, 3 seconds passed -... 98%, 13728 KB, 3625 KB/s, 3 seconds passed -... 99%, 13760 KB, 3629 KB/s, 3 seconds passed -... 99%, 13792 KB, 3637 KB/s, 3 seconds passed -... 99%, 13824 KB, 3623 KB/s, 3 seconds passed -... 99%, 13856 KB, 3623 KB/s, 3 seconds passed .. parsed-literal:: - ... 100%, 13879 KB, 3629 KB/s, 3 seconds passed + ... 0%, 32 KB, 967 KB/s, 0 seconds passed +... 0%, 64 KB, 961 KB/s, 0 seconds passed +... 0%, 96 KB, 1403 KB/s, 0 seconds passed + +.. parsed-literal:: + + ... 0%, 128 KB, 1260 KB/s, 0 seconds passed +... 1%, 160 KB, 1556 KB/s, 0 seconds passed +... 1%, 192 KB, 1836 KB/s, 0 seconds passed +... 1%, 224 KB, 2101 KB/s, 0 seconds passed +... 1%, 256 KB, 2357 KB/s, 0 seconds passed + +.. parsed-literal:: + + ... 2%, 288 KB, 2128 KB/s, 0 seconds passed +... 2%, 320 KB, 2358 KB/s, 0 seconds passed +... 2%, 352 KB, 2586 KB/s, 0 seconds passed +... 2%, 384 KB, 2807 KB/s, 0 seconds passed +... 2%, 416 KB, 3017 KB/s, 0 seconds passed +... 3%, 448 KB, 3232 KB/s, 0 seconds passed +... 3%, 480 KB, 3455 KB/s, 0 seconds passed +... 3%, 512 KB, 3567 KB/s, 0 seconds passed +... 3%, 544 KB, 3779 KB/s, 0 seconds passed +... 4%, 576 KB, 3992 KB/s, 0 seconds passed +... 4%, 608 KB, 3572 KB/s, 0 seconds passed +... 4%, 640 KB, 3750 KB/s, 0 seconds passed +... 4%, 672 KB, 3929 KB/s, 0 seconds passed +... 5%, 704 KB, 4108 KB/s, 0 seconds passed + +.. parsed-literal:: + + ... 5%, 736 KB, 4238 KB/s, 0 seconds passed +... 5%, 768 KB, 4413 KB/s, 0 seconds passed +... 5%, 800 KB, 4589 KB/s, 0 seconds passed +... 5%, 832 KB, 4764 KB/s, 0 seconds passed +... 6%, 864 KB, 4939 KB/s, 0 seconds passed +... 6%, 896 KB, 5112 KB/s, 0 seconds passed +... 6%, 928 KB, 5286 KB/s, 0 seconds passed +... 6%, 960 KB, 5459 KB/s, 0 seconds passed +... 7%, 992 KB, 5598 KB/s, 0 seconds passed +... 7%, 1024 KB, 5765 KB/s, 0 seconds passed +... 7%, 1056 KB, 5935 KB/s, 0 seconds passed +... 7%, 1088 KB, 6103 KB/s, 0 seconds passed +... 8%, 1120 KB, 6272 KB/s, 0 seconds passed +... 8%, 1152 KB, 6441 KB/s, 0 seconds passed +... 8%, 1184 KB, 5729 KB/s, 0 seconds passed +... 8%, 1216 KB, 5869 KB/s, 0 seconds passed +... 8%, 1248 KB, 6013 KB/s, 0 seconds passed +... 9%, 1280 KB, 6159 KB/s, 0 seconds passed +... 9%, 1312 KB, 6304 KB/s, 0 seconds passed +... 9%, 1344 KB, 6448 KB/s, 0 seconds passed +... 9%, 1376 KB, 6591 KB/s, 0 seconds passed +... 10%, 1408 KB, 6735 KB/s, 0 seconds passed +... 10%, 1440 KB, 6878 KB/s, 0 seconds passed +... 10%, 1472 KB, 7021 KB/s, 0 seconds passed +... 10%, 1504 KB, 7163 KB/s, 0 seconds passed +... 11%, 1536 KB, 7305 KB/s, 0 seconds passed +... 11%, 1568 KB, 7447 KB/s, 0 seconds passed +... 11%, 1600 KB, 7588 KB/s, 0 seconds passed +... 11%, 1632 KB, 7728 KB/s, 0 seconds passed +... 11%, 1664 KB, 7869 KB/s, 0 seconds passed +... 12%, 1696 KB, 8008 KB/s, 0 seconds passed +... 12%, 1728 KB, 8148 KB/s, 0 seconds passed +... 12%, 1760 KB, 8288 KB/s, 0 seconds passed +... 12%, 1792 KB, 8426 KB/s, 0 seconds passed +... 13%, 1824 KB, 8565 KB/s, 0 seconds passed +... 13%, 1856 KB, 8703 KB/s, 0 seconds passed +... 13%, 1888 KB, 8841 KB/s, 0 seconds passed +... 13%, 1920 KB, 8978 KB/s, 0 seconds passed +... 14%, 1952 KB, 9115 KB/s, 0 seconds passed +... 14%, 1984 KB, 9251 KB/s, 0 seconds passed +... 14%, 2016 KB, 9388 KB/s, 0 seconds passed +... 14%, 2048 KB, 9524 KB/s, 0 seconds passed +... 14%, 2080 KB, 9658 KB/s, 0 seconds passed +... 15%, 2112 KB, 9793 KB/s, 0 seconds passed +... 15%, 2144 KB, 9927 KB/s, 0 seconds passed +... 15%, 2176 KB, 10061 KB/s, 0 seconds passed +... 15%, 2208 KB, 10195 KB/s, 0 seconds passed +... 16%, 2240 KB, 10329 KB/s, 0 seconds passed +... 16%, 2272 KB, 10462 KB/s, 0 seconds passed +... 16%, 2304 KB, 10596 KB/s, 0 seconds passed +... 16%, 2336 KB, 10729 KB/s, 0 seconds passed +... 17%, 2368 KB, 10862 KB/s, 0 seconds passed + +.. parsed-literal:: + + ... 17%, 2400 KB, 10090 KB/s, 0 seconds passed +... 17%, 2432 KB, 10125 KB/s, 0 seconds passed +... 17%, 2464 KB, 10240 KB/s, 0 seconds passed +... 17%, 2496 KB, 10358 KB/s, 0 seconds passed +... 18%, 2528 KB, 10477 KB/s, 0 seconds passed +... 18%, 2560 KB, 10596 KB/s, 0 seconds passed +... 18%, 2592 KB, 10715 KB/s, 0 seconds passed +... 18%, 2624 KB, 10833 KB/s, 0 seconds passed +... 19%, 2656 KB, 10951 KB/s, 0 seconds passed +... 19%, 2688 KB, 11070 KB/s, 0 seconds passed +... 19%, 2720 KB, 11187 KB/s, 0 seconds passed +... 19%, 2752 KB, 11303 KB/s, 0 seconds passed +... 20%, 2784 KB, 11421 KB/s, 0 seconds passed +... 20%, 2816 KB, 11537 KB/s, 0 seconds passed +... 20%, 2848 KB, 11655 KB/s, 0 seconds passed +... 20%, 2880 KB, 11771 KB/s, 0 seconds passed +... 20%, 2912 KB, 11888 KB/s, 0 seconds passed +... 21%, 2944 KB, 12004 KB/s, 0 seconds passed +... 21%, 2976 KB, 12120 KB/s, 0 seconds passed +... 21%, 3008 KB, 12235 KB/s, 0 seconds passed +... 21%, 3040 KB, 12350 KB/s, 0 seconds passed +... 22%, 3072 KB, 12465 KB/s, 0 seconds passed +... 22%, 3104 KB, 12579 KB/s, 0 seconds passed +... 22%, 3136 KB, 12693 KB/s, 0 seconds passed +... 22%, 3168 KB, 12806 KB/s, 0 seconds passed +... 23%, 3200 KB, 12920 KB/s, 0 seconds passed +... 23%, 3232 KB, 13034 KB/s, 0 seconds passed +... 23%, 3264 KB, 13147 KB/s, 0 seconds passed +... 23%, 3296 KB, 13260 KB/s, 0 seconds passed +... 23%, 3328 KB, 13373 KB/s, 0 seconds passed +... 24%, 3360 KB, 13486 KB/s, 0 seconds passed +... 24%, 3392 KB, 13598 KB/s, 0 seconds passed +... 24%, 3424 KB, 13710 KB/s, 0 seconds passed +... 24%, 3456 KB, 13821 KB/s, 0 seconds passed +... 25%, 3488 KB, 13931 KB/s, 0 seconds passed +... 25%, 3520 KB, 14043 KB/s, 0 seconds passed +... 25%, 3552 KB, 14154 KB/s, 0 seconds passed +... 25%, 3584 KB, 14263 KB/s, 0 seconds passed +... 26%, 3616 KB, 14373 KB/s, 0 seconds passed +... 26%, 3648 KB, 14481 KB/s, 0 seconds passed +... 26%, 3680 KB, 14591 KB/s, 0 seconds passed +... 26%, 3712 KB, 14700 KB/s, 0 seconds passed +... 26%, 3744 KB, 14815 KB/s, 0 seconds passed +... 27%, 3776 KB, 14929 KB/s, 0 seconds passed +... 27%, 3808 KB, 15044 KB/s, 0 seconds passed +... 27%, 3840 KB, 15159 KB/s, 0 seconds passed +... 27%, 3872 KB, 15273 KB/s, 0 seconds passed +... 28%, 3904 KB, 15386 KB/s, 0 seconds passed +... 28%, 3936 KB, 15499 KB/s, 0 seconds passed +... 28%, 3968 KB, 15603 KB/s, 0 seconds passed +... 28%, 4000 KB, 15712 KB/s, 0 seconds passed +... 29%, 4032 KB, 15821 KB/s, 0 seconds passed +... 29%, 4064 KB, 15926 KB/s, 0 seconds passed +... 29%, 4096 KB, 16023 KB/s, 0 seconds passed +... 29%, 4128 KB, 16109 KB/s, 0 seconds passed +... 29%, 4160 KB, 16206 KB/s, 0 seconds passed +... 30%, 4192 KB, 16303 KB/s, 0 seconds passed +... 30%, 4224 KB, 16402 KB/s, 0 seconds passed +... 30%, 4256 KB, 16502 KB/s, 0 seconds passed +... 30%, 4288 KB, 16607 KB/s, 0 seconds passed +... 31%, 4320 KB, 16713 KB/s, 0 seconds passed +... 31%, 4352 KB, 16818 KB/s, 0 seconds passed +... 31%, 4384 KB, 16578 KB/s, 0 seconds passed +... 31%, 4416 KB, 16670 KB/s, 0 seconds passed +... 32%, 4448 KB, 16767 KB/s, 0 seconds passed +... 32%, 4480 KB, 16862 KB/s, 0 seconds passed +... 32%, 4512 KB, 16958 KB/s, 0 seconds passed +... 32%, 4544 KB, 17053 KB/s, 0 seconds passed +... 32%, 4576 KB, 17153 KB/s, 0 seconds passed +... 33%, 4608 KB, 17254 KB/s, 0 seconds passed +... 33%, 4640 KB, 17106 KB/s, 0 seconds passed +... 33%, 4672 KB, 17187 KB/s, 0 seconds passed +... 33%, 4704 KB, 17278 KB/s, 0 seconds passed +... 34%, 4736 KB, 17372 KB/s, 0 seconds passed +... 34%, 4768 KB, 17465 KB/s, 0 seconds passed +... 34%, 4800 KB, 17561 KB/s, 0 seconds passed +... 34%, 4832 KB, 17659 KB/s, 0 seconds passed +... 35%, 4864 KB, 17749 KB/s, 0 seconds passed +... 35%, 4896 KB, 17841 KB/s, 0 seconds passed +... 35%, 4928 KB, 17935 KB/s, 0 seconds passed + +.. parsed-literal:: + + ... 35%, 4960 KB, 17758 KB/s, 0 seconds passed +... 35%, 4992 KB, 17845 KB/s, 0 seconds passed +... 36%, 5024 KB, 17939 KB/s, 0 seconds passed +... 36%, 5056 KB, 18033 KB/s, 0 seconds passed +... 36%, 5088 KB, 18127 KB/s, 0 seconds passed +... 36%, 5120 KB, 18222 KB/s, 0 seconds passed +... 37%, 5152 KB, 18316 KB/s, 0 seconds passed +... 37%, 5184 KB, 18410 KB/s, 0 seconds passed +... 37%, 5216 KB, 18504 KB/s, 0 seconds passed +... 37%, 5248 KB, 18598 KB/s, 0 seconds passed +... 38%, 5280 KB, 18679 KB/s, 0 seconds passed +... 38%, 5312 KB, 18744 KB/s, 0 seconds passed +... 38%, 5344 KB, 18835 KB/s, 0 seconds passed +... 38%, 5376 KB, 18929 KB/s, 0 seconds passed +... 38%, 5408 KB, 19021 KB/s, 0 seconds passed +... 39%, 5440 KB, 19110 KB/s, 0 seconds passed +... 39%, 5472 KB, 19179 KB/s, 0 seconds passed +... 39%, 5504 KB, 19267 KB/s, 0 seconds passed +... 39%, 5536 KB, 19358 KB/s, 0 seconds passed +... 40%, 5568 KB, 19430 KB/s, 0 seconds passed +... 40%, 5600 KB, 19521 KB/s, 0 seconds passed +... 40%, 5632 KB, 19608 KB/s, 0 seconds passed +... 40%, 5664 KB, 19695 KB/s, 0 seconds passed +... 41%, 5696 KB, 19768 KB/s, 0 seconds passed +... 41%, 5728 KB, 19835 KB/s, 0 seconds passed +... 41%, 5760 KB, 19915 KB/s, 0 seconds passed +... 41%, 5792 KB, 20005 KB/s, 0 seconds passed +... 41%, 5824 KB, 20091 KB/s, 0 seconds passed +... 42%, 5856 KB, 20158 KB/s, 0 seconds passed +... 42%, 5888 KB, 20247 KB/s, 0 seconds passed +... 42%, 5920 KB, 20314 KB/s, 0 seconds passed +... 42%, 5952 KB, 20396 KB/s, 0 seconds passed +... 43%, 5984 KB, 20484 KB/s, 0 seconds passed +... 43%, 6016 KB, 20559 KB/s, 0 seconds passed +... 43%, 6048 KB, 20639 KB/s, 0 seconds passed +... 43%, 6080 KB, 20726 KB/s, 0 seconds passed +... 44%, 6112 KB, 20811 KB/s, 0 seconds passed +... 44%, 6144 KB, 20898 KB/s, 0 seconds passed +... 44%, 6176 KB, 20983 KB/s, 0 seconds passed +... 44%, 6208 KB, 21065 KB/s, 0 seconds passed +... 44%, 6240 KB, 21117 KB/s, 0 seconds passed +... 45%, 6272 KB, 21201 KB/s, 0 seconds passed +... 45%, 6304 KB, 20427 KB/s, 0 seconds passed +... 45%, 6336 KB, 20501 KB/s, 0 seconds passed +... 45%, 6368 KB, 20582 KB/s, 0 seconds passed +... 46%, 6400 KB, 20664 KB/s, 0 seconds passed +... 46%, 6432 KB, 20746 KB/s, 0 seconds passed +... 46%, 6464 KB, 20829 KB/s, 0 seconds passed +... 46%, 6496 KB, 20911 KB/s, 0 seconds passed +... 47%, 6528 KB, 20947 KB/s, 0 seconds passed +... 47%, 6560 KB, 21027 KB/s, 0 seconds passed +... 47%, 6592 KB, 21107 KB/s, 0 seconds passed +... 47%, 6624 KB, 21188 KB/s, 0 seconds passed +... 47%, 6656 KB, 21267 KB/s, 0 seconds passed +... 48%, 6688 KB, 21347 KB/s, 0 seconds passed +... 48%, 6720 KB, 21429 KB/s, 0 seconds passed +... 48%, 6752 KB, 21511 KB/s, 0 seconds passed +... 48%, 6784 KB, 21592 KB/s, 0 seconds passed +... 49%, 6816 KB, 21673 KB/s, 0 seconds passed +... 49%, 6848 KB, 21754 KB/s, 0 seconds passed +... 49%, 6880 KB, 21835 KB/s, 0 seconds passed +... 49%, 6912 KB, 21915 KB/s, 0 seconds passed +... 50%, 6944 KB, 21996 KB/s, 0 seconds passed +... 50%, 6976 KB, 22076 KB/s, 0 seconds passed +... 50%, 7008 KB, 22156 KB/s, 0 seconds passed +... 50%, 7040 KB, 22236 KB/s, 0 seconds passed +... 50%, 7072 KB, 22317 KB/s, 0 seconds passed +... 51%, 7104 KB, 22274 KB/s, 0 seconds passed +... 51%, 7136 KB, 22315 KB/s, 0 seconds passed +... 51%, 7168 KB, 22392 KB/s, 0 seconds passed +... 51%, 7200 KB, 22471 KB/s, 0 seconds passed +... 52%, 7232 KB, 22550 KB/s, 0 seconds passed +... 52%, 7264 KB, 22629 KB/s, 0 seconds passed +... 52%, 7296 KB, 22708 KB/s, 0 seconds passed +... 52%, 7328 KB, 22786 KB/s, 0 seconds passed +... 53%, 7360 KB, 22865 KB/s, 0 seconds passed +... 53%, 7392 KB, 22943 KB/s, 0 seconds passed +... 53%, 7424 KB, 23021 KB/s, 0 seconds passed +... 53%, 7456 KB, 23097 KB/s, 0 seconds passed +... 53%, 7488 KB, 23176 KB/s, 0 seconds passed +... 54%, 7520 KB, 23255 KB/s, 0 seconds passed +... 54%, 7552 KB, 23335 KB/s, 0 seconds passed +... 54%, 7584 KB, 23415 KB/s, 0 seconds passed +... 54%, 7616 KB, 23494 KB/s, 0 seconds passed +... 55%, 7648 KB, 23573 KB/s, 0 seconds passed +... 55%, 7680 KB, 23653 KB/s, 0 seconds passed +... 55%, 7712 KB, 23733 KB/s, 0 seconds passed +... 55%, 7744 KB, 23812 KB/s, 0 seconds passed +... 56%, 7776 KB, 23891 KB/s, 0 seconds passed +... 56%, 7808 KB, 23971 KB/s, 0 seconds passed +... 56%, 7840 KB, 24049 KB/s, 0 seconds passed +... 56%, 7872 KB, 24128 KB/s, 0 seconds passed +... 56%, 7904 KB, 24207 KB/s, 0 seconds passed +... 57%, 7936 KB, 24286 KB/s, 0 seconds passed +... 57%, 7968 KB, 24364 KB/s, 0 seconds passed + +.. parsed-literal:: + + ... 57%, 8000 KB, 24441 KB/s, 0 seconds passed +... 57%, 8032 KB, 24519 KB/s, 0 seconds passed +... 58%, 8064 KB, 24597 KB/s, 0 seconds passed +... 58%, 8096 KB, 24674 KB/s, 0 seconds passed +... 58%, 8128 KB, 24752 KB/s, 0 seconds passed +... 58%, 8160 KB, 24830 KB/s, 0 seconds passed +... 59%, 8192 KB, 24907 KB/s, 0 seconds passed +... 59%, 8224 KB, 24984 KB/s, 0 seconds passed +... 59%, 8256 KB, 25051 KB/s, 0 seconds passed +... 59%, 8288 KB, 25127 KB/s, 0 seconds passed +... 59%, 8320 KB, 25204 KB/s, 0 seconds passed +... 60%, 8352 KB, 25276 KB/s, 0 seconds passed +... 60%, 8384 KB, 25353 KB/s, 0 seconds passed +... 60%, 8416 KB, 25429 KB/s, 0 seconds passed +... 60%, 8448 KB, 25504 KB/s, 0 seconds passed +... 61%, 8480 KB, 25580 KB/s, 0 seconds passed +... 61%, 8512 KB, 25656 KB/s, 0 seconds passed +... 61%, 8544 KB, 25732 KB/s, 0 seconds passed +... 61%, 8576 KB, 25808 KB/s, 0 seconds passed +... 62%, 8608 KB, 25883 KB/s, 0 seconds passed +... 62%, 8640 KB, 25958 KB/s, 0 seconds passed +... 62%, 8672 KB, 26032 KB/s, 0 seconds passed +... 62%, 8704 KB, 26107 KB/s, 0 seconds passed +... 62%, 8736 KB, 26180 KB/s, 0 seconds passed +... 63%, 8768 KB, 26256 KB/s, 0 seconds passed +... 63%, 8800 KB, 26331 KB/s, 0 seconds passed +... 63%, 8832 KB, 26406 KB/s, 0 seconds passed +... 63%, 8864 KB, 26480 KB/s, 0 seconds passed +... 64%, 8896 KB, 26550 KB/s, 0 seconds passed +... 64%, 8928 KB, 26624 KB/s, 0 seconds passed +... 64%, 8960 KB, 26698 KB/s, 0 seconds passed +... 64%, 8992 KB, 26773 KB/s, 0 seconds passed +... 65%, 9024 KB, 26838 KB/s, 0 seconds passed +... 65%, 9056 KB, 26907 KB/s, 0 seconds passed +... 65%, 9088 KB, 26982 KB/s, 0 seconds passed +... 65%, 9120 KB, 27055 KB/s, 0 seconds passed +... 65%, 9152 KB, 27128 KB/s, 0 seconds passed +... 66%, 9184 KB, 27201 KB/s, 0 seconds passed +... 66%, 9216 KB, 27271 KB/s, 0 seconds passed +... 66%, 9248 KB, 27348 KB/s, 0 seconds passed +... 66%, 9280 KB, 27421 KB/s, 0 seconds passed +... 67%, 9312 KB, 27494 KB/s, 0 seconds passed +... 67%, 9344 KB, 27563 KB/s, 0 seconds passed +... 67%, 9376 KB, 27634 KB/s, 0 seconds passed +... 67%, 9408 KB, 27708 KB/s, 0 seconds passed +... 68%, 9440 KB, 27781 KB/s, 0 seconds passed +... 68%, 9472 KB, 27848 KB/s, 0 seconds passed +... 68%, 9504 KB, 27924 KB/s, 0 seconds passed +... 68%, 9536 KB, 27997 KB/s, 0 seconds passed +... 68%, 9568 KB, 28069 KB/s, 0 seconds passed +... 69%, 9600 KB, 28136 KB/s, 0 seconds passed +... 69%, 9632 KB, 28208 KB/s, 0 seconds passed +... 69%, 9664 KB, 28280 KB/s, 0 seconds passed +... 69%, 9696 KB, 28351 KB/s, 0 seconds passed +... 70%, 9728 KB, 28423 KB/s, 0 seconds passed +... 70%, 9760 KB, 28490 KB/s, 0 seconds passed +... 70%, 9792 KB, 28561 KB/s, 0 seconds passed +... 70%, 9824 KB, 28622 KB/s, 0 seconds passed +... 71%, 9856 KB, 28695 KB/s, 0 seconds passed +... 71%, 9888 KB, 28765 KB/s, 0 seconds passed +... 71%, 9920 KB, 28836 KB/s, 0 seconds passed +... 71%, 9952 KB, 28906 KB/s, 0 seconds passed +... 71%, 9984 KB, 28977 KB/s, 0 seconds passed +... 72%, 10016 KB, 29042 KB/s, 0 seconds passed +... 72%, 10048 KB, 29113 KB/s, 0 seconds passed +... 72%, 10080 KB, 29183 KB/s, 0 seconds passed +... 72%, 10112 KB, 29254 KB/s, 0 seconds passed +... 73%, 10144 KB, 29319 KB/s, 0 seconds passed +... 73%, 10176 KB, 29388 KB/s, 0 seconds passed +... 73%, 10208 KB, 29453 KB/s, 0 seconds passed +... 73%, 10240 KB, 29524 KB/s, 0 seconds passed +... 74%, 10272 KB, 29593 KB/s, 0 seconds passed +... 74%, 10304 KB, 29662 KB/s, 0 seconds passed +... 74%, 10336 KB, 29728 KB/s, 0 seconds passed +... 74%, 10368 KB, 29801 KB/s, 0 seconds passed +... 74%, 10400 KB, 29865 KB/s, 0 seconds passed +... 75%, 10432 KB, 29935 KB/s, 0 seconds passed +... 75%, 10464 KB, 30004 KB/s, 0 seconds passed +... 75%, 10496 KB, 30073 KB/s, 0 seconds passed +... 75%, 10528 KB, 30137 KB/s, 0 seconds passed +... 76%, 10560 KB, 30205 KB/s, 0 seconds passed +... 76%, 10592 KB, 30273 KB/s, 0 seconds passed +... 76%, 10624 KB, 30342 KB/s, 0 seconds passed +... 76%, 10656 KB, 30411 KB/s, 0 seconds passed +... 77%, 10688 KB, 30474 KB/s, 0 seconds passed +... 77%, 10720 KB, 30543 KB/s, 0 seconds passed +... 77%, 10752 KB, 30610 KB/s, 0 seconds passed +... 77%, 10784 KB, 30678 KB/s, 0 seconds passed +... 77%, 10816 KB, 30741 KB/s, 0 seconds passed +... 78%, 10848 KB, 30809 KB/s, 0 seconds passed +... 78%, 10880 KB, 30882 KB/s, 0 seconds passed +... 78%, 10912 KB, 30948 KB/s, 0 seconds passed +... 78%, 10944 KB, 31012 KB/s, 0 seconds passed +... 79%, 10976 KB, 31079 KB/s, 0 seconds passed +... 79%, 11008 KB, 31145 KB/s, 0 seconds passed +... 79%, 11040 KB, 31213 KB/s, 0 seconds passed +... 79%, 11072 KB, 31280 KB/s, 0 seconds passed +... 80%, 11104 KB, 31341 KB/s, 0 seconds passed +... 80%, 11136 KB, 31409 KB/s, 0 seconds passed +... 80%, 11168 KB, 31476 KB/s, 0 seconds passed +... 80%, 11200 KB, 31541 KB/s, 0 seconds passed +... 80%, 11232 KB, 31604 KB/s, 0 seconds passed +... 81%, 11264 KB, 31671 KB/s, 0 seconds passed +... 81%, 11296 KB, 31723 KB/s, 0 seconds passed +... 81%, 11328 KB, 31785 KB/s, 0 seconds passed +... 81%, 11360 KB, 31845 KB/s, 0 seconds passed +... 82%, 11392 KB, 31916 KB/s, 0 seconds passed +... 82%, 11424 KB, 31987 KB/s, 0 seconds passed +... 82%, 11456 KB, 32048 KB/s, 0 seconds passed +... 82%, 11488 KB, 32113 KB/s, 0 seconds passed +... 82%, 11520 KB, 32179 KB/s, 0 seconds passed +... 83%, 11552 KB, 32245 KB/s, 0 seconds passed +... 83%, 11584 KB, 32310 KB/s, 0 seconds passed +... 83%, 11616 KB, 32375 KB/s, 0 seconds passed +... 83%, 11648 KB, 32440 KB/s, 0 seconds passed +... 84%, 11680 KB, 32501 KB/s, 0 seconds passed +... 84%, 11712 KB, 32565 KB/s, 0 seconds passed +... 84%, 11744 KB, 32631 KB/s, 0 seconds passed +... 84%, 11776 KB, 32696 KB/s, 0 seconds passed +... 85%, 11808 KB, 32760 KB/s, 0 seconds passed +... 85%, 11840 KB, 32825 KB/s, 0 seconds passed +... 85%, 11872 KB, 32885 KB/s, 0 seconds passed +... 85%, 11904 KB, 32949 KB/s, 0 seconds passed +... 85%, 11936 KB, 33013 KB/s, 0 seconds passed +... 86%, 11968 KB, 33078 KB/s, 0 seconds passed +... 86%, 12000 KB, 33142 KB/s, 0 seconds passed +... 86%, 12032 KB, 33206 KB/s, 0 seconds passed +... 86%, 12064 KB, 33265 KB/s, 0 seconds passed +... 87%, 12096 KB, 33319 KB/s, 0 seconds passed +... 87%, 12128 KB, 33383 KB/s, 0 seconds passed +... 87%, 12160 KB, 33447 KB/s, 0 seconds passed +... 87%, 12192 KB, 33510 KB/s, 0 seconds passed +... 88%, 12224 KB, 33569 KB/s, 0 seconds passed +... 88%, 12256 KB, 33633 KB/s, 0 seconds passed +... 88%, 12288 KB, 33695 KB/s, 0 seconds passed +... 88%, 12320 KB, 33759 KB/s, 0 seconds passed +... 88%, 12352 KB, 33817 KB/s, 0 seconds passed +... 89%, 12384 KB, 33880 KB/s, 0 seconds passed +... 89%, 12416 KB, 33943 KB/s, 0 seconds passed +... 89%, 12448 KB, 34006 KB/s, 0 seconds passed +... 89%, 12480 KB, 34068 KB/s, 0 seconds passed +... 90%, 12512 KB, 34132 KB/s, 0 seconds passed +... 90%, 12544 KB, 34194 KB/s, 0 seconds passed +... 90%, 12576 KB, 34256 KB/s, 0 seconds passed +... 90%, 12608 KB, 34319 KB/s, 0 seconds passed +... 91%, 12640 KB, 34377 KB/s, 0 seconds passed +... 91%, 12672 KB, 34426 KB/s, 0 seconds passed +... 91%, 12704 KB, 34482 KB/s, 0 seconds passed +... 91%, 12736 KB, 34540 KB/s, 0 seconds passed +... 91%, 12768 KB, 34610 KB/s, 0 seconds passed +... 92%, 12800 KB, 34671 KB/s, 0 seconds passed +... 92%, 12832 KB, 34733 KB/s, 0 seconds passed +... 92%, 12864 KB, 34790 KB/s, 0 seconds passed +... 92%, 12896 KB, 34857 KB/s, 0 seconds passed +... 93%, 12928 KB, 34918 KB/s, 0 seconds passed +... 93%, 12960 KB, 34979 KB/s, 0 seconds passed +... 93%, 12992 KB, 35036 KB/s, 0 seconds passed +... 93%, 13024 KB, 35098 KB/s, 0 seconds passed +... 94%, 13056 KB, 35158 KB/s, 0 seconds passed +... 94%, 13088 KB, 35220 KB/s, 0 seconds passed +... 94%, 13120 KB, 35281 KB/s, 0 seconds passed +... 94%, 13152 KB, 35342 KB/s, 0 seconds passed +... 94%, 13184 KB, 35397 KB/s, 0 seconds passed + +.. parsed-literal:: + + ... 95%, 13216 KB, 33905 KB/s, 0 seconds passed +... 95%, 13248 KB, 33962 KB/s, 0 seconds passed +... 95%, 13280 KB, 34026 KB/s, 0 seconds passed +... 95%, 13312 KB, 34086 KB/s, 0 seconds passed +... 96%, 13344 KB, 34130 KB/s, 0 seconds passed +... 96%, 13376 KB, 34189 KB/s, 0 seconds passed +... 96%, 13408 KB, 34247 KB/s, 0 seconds passed +... 96%, 13440 KB, 34302 KB/s, 0 seconds passed +... 97%, 13472 KB, 34360 KB/s, 0 seconds passed +... 97%, 13504 KB, 34418 KB/s, 0 seconds passed +... 97%, 13536 KB, 34477 KB/s, 0 seconds passed +... 97%, 13568 KB, 34535 KB/s, 0 seconds passed +... 97%, 13600 KB, 34592 KB/s, 0 seconds passed +... 98%, 13632 KB, 34651 KB/s, 0 seconds passed +... 98%, 13664 KB, 34709 KB/s, 0 seconds passed +... 98%, 13696 KB, 34760 KB/s, 0 seconds passed +... 98%, 13728 KB, 34819 KB/s, 0 seconds passed +... 99%, 13760 KB, 34876 KB/s, 0 seconds passed +... 99%, 13792 KB, 34934 KB/s, 0 seconds passed +... 99%, 13824 KB, 34991 KB/s, 0 seconds passed +... 99%, 13856 KB, 35044 KB/s, 0 seconds passed +... 100%, 13879 KB, 35077 KB/s, 0 seconds passed @@ -887,7 +690,7 @@ Converting mobilenet-v2-pytorch… .. parsed-literal:: ========== Converting mobilenet-v2-pytorch to ONNX - Conversion to ONNX command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/model_zoo/internal_scripts/pytorch_to_onnx.py --model-name=mobilenet_v2 --weights=model/public/mobilenet-v2-pytorch/mobilenet_v2-b0353104.pth --import-module=torchvision.models --input-shape=1,3,224,224 --output-file=model/public/mobilenet-v2-pytorch/mobilenet-v2.onnx --input-names=data --output-names=prob + Conversion to ONNX command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/omz_tools/internal_scripts/pytorch_to_onnx.py --model-name=mobilenet_v2 --weights=model/public/mobilenet-v2-pytorch/mobilenet_v2-b0353104.pth --import-module=torchvision.models --input-shape=1,3,224,224 --output-file=model/public/mobilenet-v2-pytorch/mobilenet-v2.onnx --input-names=data --output-names=prob @@ -900,7 +703,7 @@ Converting mobilenet-v2-pytorch… ========== Converting mobilenet-v2-pytorch to IR (FP16) - Conversion command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/bin/mo --framework=onnx --output_dir=model/public/mobilenet-v2-pytorch/FP16 --model_name=mobilenet-v2-pytorch --input=data '--mean_values=data[123.675,116.28,103.53]' '--scale_values=data[58.624,57.12,57.375]' --reverse_input_channels --output=prob --input_model=model/public/mobilenet-v2-pytorch/mobilenet-v2.onnx '--layout=data(NCHW)' '--input_shape=[1, 3, 224, 224]' --compress_to_fp16=True + Conversion command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/.venv/bin/mo --framework=onnx --output_dir=model/public/mobilenet-v2-pytorch/FP16 --model_name=mobilenet-v2-pytorch --input=data '--mean_values=data[123.675,116.28,103.53]' '--scale_values=data[58.624,57.12,57.375]' --reverse_input_channels --output=prob --input_model=model/public/mobilenet-v2-pytorch/mobilenet-v2.onnx '--layout=data(NCHW)' '--input_shape=[1, 3, 224, 224]' --compress_to_fp16=True @@ -908,14 +711,11 @@ Converting mobilenet-v2-pytorch… [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression explicitly by adding argument --compress_to_fp16=False. Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html - [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html [ INFO ] MO command line tool is considered as the legacy conversion API as of OpenVINO 2023.2 release. Please use OpenVINO Model Converter (OVC). OVC represents a lightweight alternative of MO and provides simplified model conversion API. Find more information about transition from MO to OVC at https://docs.openvino.ai/2023.2/openvino_docs_OV_Converter_UG_prepare_model_convert_model_MO_OVC_transition.html [ SUCCESS ] Generated IR version 11 model. - [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/notebooks/104-model-tools/model/public/mobilenet-v2-pytorch/FP16/mobilenet-v2-pytorch.xml - [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/notebooks/104-model-tools/model/public/mobilenet-v2-pytorch/FP16/mobilenet-v2-pytorch.bin - + [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/notebooks/104-model-tools/model/public/mobilenet-v2-pytorch/FP16/mobilenet-v2-pytorch.xml + [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/notebooks/104-model-tools/model/public/mobilenet-v2-pytorch/FP16/mobilenet-v2-pytorch.bin @@ -967,10 +767,9 @@ information in a dictionary. 'description': 'MobileNet V2 is image classification model pre-trained on ImageNet dataset. This is a PyTorch* implementation of MobileNetV2 architecture as described in the paper "Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation" .\nThe model input is a blob that consists of a single image of "1, 3, 224, 224" in "RGB" order.\nThe model output is typical object classifier for the 1000 different classifications matching with those in the ImageNet database.', 'framework': 'pytorch', 'license_url': 'https://raw.githubusercontent.com/pytorch/vision/master/LICENSE', - 'accuracy_config': '/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/model_zoo/models/public/mobilenet-v2-pytorch/accuracy-check.yml', - 'model_config': '/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/model_zoo/models/public/mobilenet-v2-pytorch/model.yml', + 'accuracy_config': '/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/omz_tools/models/public/mobilenet-v2-pytorch/accuracy-check.yml', + 'model_config': '/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/omz_tools/models/public/mobilenet-v2-pytorch/model.yml', 'precisions': ['FP16', 'FP32'], - 'quantization_output_precisions': ['FP16-INT8', 'FP32-INT8'], 'subdirectory': 'public/mobilenet-v2-pytorch', 'task_type': 'classification', 'input_info': [{'name': 'data', @@ -1039,7 +838,7 @@ seconds… [ INFO ] Parsing input parameters [Step 2/11] Loading OpenVINO Runtime [ INFO ] OpenVINO: - [ INFO ] Build ................................. 2023.3.0-13775-ceeafaf64f3-releases/2023/3 + [ INFO ] Build ................................. 2024.0.0-14509-34caeefd078-releases/2024/0 [ INFO ] [ INFO ] Device info: @@ -1047,14 +846,18 @@ seconds… .. parsed-literal:: [ INFO ] CPU - [ INFO ] Build ................................. 2023.3.0-13775-ceeafaf64f3-releases/2023/3 + [ INFO ] Build ................................. 2024.0.0-14509-34caeefd078-releases/2024/0 [ INFO ] [ INFO ] [Step 3/11] Setting device configuration [ WARNING ] Performance hint was not explicitly specified in command line. Device(CPU) performance hint will be set to PerformanceMode.THROUGHPUT. [Step 4/11] Reading model files [ INFO ] Loading model files - [ INFO ] Read model took 28.83 ms + + +.. parsed-literal:: + + [ INFO ] Read model took 30.94 ms [ INFO ] Original model I/O parameters: [ INFO ] Model inputs: [ INFO ] data (node: data) : f32 / [N,C,H,W] / [1,3,224,224] @@ -1072,7 +875,7 @@ seconds… .. parsed-literal:: - [ INFO ] Compile model took 135.30 ms + [ INFO ] Compile model took 136.02 ms [Step 8/11] Querying optimal runtime parameters [ INFO ] Model: [ INFO ] NETWORK_NAME: main_graph @@ -1090,31 +893,30 @@ seconds… [ INFO ] ENABLE_HYPER_THREADING: True [ INFO ] EXECUTION_DEVICES: ['CPU'] [ INFO ] CPU_DENORMALS_OPTIMIZATION: False + [ INFO ] LOG_LEVEL: Level.NO [ INFO ] CPU_SPARSE_WEIGHTS_DECOMPRESSION_RATE: 1.0 + [ INFO ] DYNAMIC_QUANTIZATION_GROUP_SIZE: 0 + [ INFO ] KV_CACHE_PRECISION: [Step 9/11] Creating infer requests and preparing input tensors [ WARNING ] No input files were given for input 'data'!. This input will be filled with random values! [ INFO ] Fill input 'data' with random values [Step 10/11] Measuring performance (Start inference asynchronously, 6 inference requests, limits: 15000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). - [ INFO ] First inference took 6.27 ms + [ INFO ] First inference took 6.30 ms .. parsed-literal:: [Step 11/11] Dumping statistics report - - -.. parsed-literal:: - [ INFO ] Execution Devices:['CPU'] - [ INFO ] Count: 20346 iterations - [ INFO ] Duration: 15007.48 ms + [ INFO ] Count: 20280 iterations + [ INFO ] Duration: 15006.67 ms [ INFO ] Latency: - [ INFO ] Median: 4.29 ms - [ INFO ] Average: 4.30 ms - [ INFO ] Min: 2.40 ms - [ INFO ] Max: 12.55 ms - [ INFO ] Throughput: 1355.72 FPS + [ INFO ] Median: 4.31 ms + [ INFO ] Average: 4.31 ms + [ INFO ] Min: 2.75 ms + [ INFO ] Max: 13.73 ms + [ INFO ] Throughput: 1351.40 FPS Benchmark with Different Settings @@ -1144,7 +946,7 @@ In the next cell, define the ``benchmark_model()`` function that calls ``benchmark_app``. This makes it easy to try different combinations. In the cell below that, you display available devices on the system. - **NOTE**: In this notebook, ``benchmark_app`` runs for 15 seconds to + **Note**: In this notebook, ``benchmark_app`` runs for 15 seconds to give a quick indication of performance. For more accurate performance, it is recommended to run inference for at least one minute by setting the ``t`` parameter to 60 or higher, and run @@ -1186,40 +988,43 @@ the cell below that, you display available devices on the system. CPU: Intel(R) Core(TM) i9-10920X CPU @ 3.50GHz -.. code:: ipython3 - - benchmark_model(model_path, device="CPU", seconds=15, api="async") +You can select inference device using device widget +.. code:: ipython3 + import ipywidgets as widgets -**Benchmark mobilenet-v2-pytorch.xml with CPU for 15 seconds with async -inference** + device = widgets.Dropdown( + options=core.available_devices + ["AUTO"], + value='CPU', + description='Device:', + disabled=False, + ) + device -Benchmark command: -``benchmark_app -m model/public/mobilenet-v2-pytorch/FP16/mobilenet-v2-pytorch.xml -d CPU -t 15 -api async -b 1`` .. parsed-literal:: - command ended + Dropdown(description='Device:', options=('CPU', 'AUTO'), value='CPU') .. code:: ipython3 - benchmark_model(model_path, device="AUTO", seconds=15, api="async") + benchmark_model(model_path, device=device.value, seconds=15, api="async") -**Benchmark mobilenet-v2-pytorch.xml with AUTO for 15 seconds with async +**Benchmark mobilenet-v2-pytorch.xml with CPU for 15 seconds with async inference** Benchmark command: -``benchmark_app -m model/public/mobilenet-v2-pytorch/FP16/mobilenet-v2-pytorch.xml -d AUTO -t 15 -api async -b 1`` +``benchmark_app -m model/public/mobilenet-v2-pytorch/FP16/mobilenet-v2-pytorch.xml -d CPU -t 15 -api async -b 1`` .. parsed-literal:: @@ -1227,25 +1032,3 @@ Benchmark command: command ended - -.. code:: ipython3 - - benchmark_model(model_path, device="GPU", seconds=15, api="async") - - - -.. raw:: html - -
Running this cell requires a GPU device, which is not available on this system. The following device is available: CPU - - -.. code:: ipython3 - - benchmark_model(model_path, device="MULTI:CPU,GPU", seconds=15, api="async") - - - -.. raw:: html - -
Running this cell requires a GPU device, which is not available on this system. The following device is available: CPU - diff --git a/docs/notebooks/105-language-quantize-bert-with-output.rst b/docs/notebooks/105-language-quantize-bert-with-output.rst index 30715d34ceeb80..d6f312c4d997f1 100644 --- a/docs/notebooks/105-language-quantize-bert-with-output.rst +++ b/docs/notebooks/105-language-quantize-bert-with-output.rst @@ -5,7 +5,7 @@ This tutorial demonstrates how to apply ``INT8`` quantization to the Natural Language Processing model known as `BERT `__, using the `Post-Training Quantization -API `__ +API `__ (NNCF library). A fine-tuned `HuggingFace BERT `__ `PyTorch `__ model, trained on the `Microsoft @@ -42,7 +42,7 @@ Table of contents: .. code:: ipython3 - %pip install -q "nncf>=2.5.0" + %pip install -q "nncf>=2.5.0" %pip install -q transformers datasets evaluate --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q "openvino>=2023.1.0" @@ -75,7 +75,7 @@ Imports from zipfile import ZipFile from typing import Iterable from typing import Any - + import datasets import evaluate import numpy as np @@ -84,7 +84,7 @@ Imports import openvino as ov import torch from transformers import BertForSequenceClassification, BertTokenizer - + # Fetch `notebook_utils` module import urllib.request urllib.request.urlretrieve( @@ -96,14 +96,14 @@ Imports .. parsed-literal:: - 2024-02-09 22:38:10.763464: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2024-02-09 22:38:10.797722: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + 2024-03-12 22:22:23.157910: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2024-03-12 22:22:23.191787: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. .. parsed-literal:: - 2024-02-09 22:38:11.441310: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + 2024-03-12 22:22:23.830567: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT .. parsed-literal:: @@ -124,7 +124,7 @@ Settings MODEL_LINK = "https://download.pytorch.org/tutorial/MRPC.zip" FILE_NAME = MODEL_LINK.split("/")[-1] PRETRAINED_MODEL_DIR = os.path.join(MODEL_DIR, "MRPC") - + os.makedirs(DATA_DIR, exist_ok=True) os.makedirs(MODEL_DIR, exist_ok=True) @@ -169,10 +169,10 @@ PyTorch model formats are supported: input_shape = ov.PartialShape([1, -1]) ir_model_xml = Path(MODEL_DIR) / "bert_mrpc.xml" core = ov.Core() - + torch_model = BertForSequenceClassification.from_pretrained(PRETRAINED_MODEL_DIR) torch_model.eval - + input_info = [("input_ids", input_shape, np.int64),("attention_mask", input_shape, np.int64),("token_type_ids", input_shape, np.int64)] default_input = torch.ones(1, MAX_SEQ_LENGTH, dtype=torch.int64) inputs = { @@ -180,7 +180,7 @@ PyTorch model formats are supported: "attention_mask": default_input, "token_type_ids": default_input, } - + # Convert the PyTorch model to OpenVINO IR FP32. if not ir_model_xml.exists(): model = ov.convert_model(torch_model, example_input=inputs, input=input_info) @@ -191,7 +191,7 @@ PyTorch model formats are supported: .. parsed-literal:: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() return self.fget.__get__(instance, owner)() @@ -215,6 +215,12 @@ PyTorch model formats are supported: No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' +.. parsed-literal:: + + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/modeling_utils.py:4193: FutureWarning: `_is_quantized_training_enabled` is going to be deprecated in transformers 4.39.0. Please use `model.hf_quantizer.is_trainable` instead + warnings.warn( + + Prepare the Dataset ------------------- @@ -230,16 +236,16 @@ tokenizer from HuggingFace. def create_data_source(): raw_dataset = datasets.load_dataset('glue', 'mrpc', split='validation') tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_DIR) - + def _preprocess_fn(examples): texts = (examples['sentence1'], examples['sentence2']) result = tokenizer(*texts, padding='max_length', max_length=MAX_SEQ_LENGTH, truncation=True) result['labels'] = examples['label'] return result processed_dataset = raw_dataset.map(_preprocess_fn, batched=True, batch_size=1) - + return processed_dataset - + data_source = create_data_source() Optimize model using NNCF Post-training Quantization API @@ -261,7 +267,7 @@ The optimization process contains the following steps: .. code:: ipython3 INPUT_NAMES = [key for key in inputs.keys()] - + def transform_fn(data_item): """ Extract the model's input from the data item. @@ -272,7 +278,7 @@ The optimization process contains the following steps: name: np.asarray([data_item[name]], dtype=np.int64) for name in INPUT_NAMES } return inputs - + calibration_dataset = nncf.Dataset(data_source, transform_fn) # Quantize the model. By specifying model_type, we specify additional transformer patterns in the model. quantized_model = nncf.quantize(model, calibration_dataset, @@ -286,7 +292,9 @@ The optimization process contains the following steps: +.. raw:: html +

 
 
 
@@ -305,7 +313,9 @@ The optimization process contains the following steps:
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -334,7 +344,9 @@ The optimization process contains the following steps:
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -353,7 +365,9 @@ The optimization process contains the following steps:
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -392,14 +406,14 @@ select device from dropdown list for running inference using OpenVINO
 .. code:: ipython3
 
     import ipywidgets as widgets
-
+    
     device = widgets.Dropdown(
         options=core.available_devices + ["AUTO"],
         value='AUTO',
         description='Device:',
         disabled=False,
     )
-
+    
     device
 
 
@@ -427,10 +441,10 @@ changing ``sample_idx`` to another value (from 0 to 407).
     sample_idx = 5
     sample = data_source[sample_idx]
     inputs = {k: torch.unsqueeze(torch.tensor(sample[k]), 0) for k in ['input_ids', 'token_type_ids', 'attention_mask']}
-
+    
     result = compiled_quantized_model(inputs)[output_layer]
     result = np.argmax(result)
-
+    
     print(f"Text 1: {sample['sentence1']}")
     print(f"Text 2: {sample['sentence2']}")
     print(f"The same meaning: {'yes' if result == 1 else 'no'}")
@@ -452,12 +466,12 @@ Compare F1-score of FP32 and INT8 models
 
     def validate(model: ov.Model, dataset: Iterable[Any]) -> float:
         """
-        Evaluate the model on GLUE dataset.
+        Evaluate the model on GLUE dataset. 
         Returns F1 score metric.
         """
         compiled_model = core.compile_model(model, device_name=device.value)
         output_layer = compiled_model.output(0)
-
+    
         metric = evaluate.load('glue', 'mrpc')
         for batch in dataset:
             inputs = [
@@ -468,14 +482,14 @@ Compare F1-score of FP32 and INT8 models
             metric.add_batch(predictions=[predictions], references=[batch['labels']])
         metrics = metric.compute()
         f1_score = metrics['f1']
-
+    
         return f1_score
-
-
+    
+    
     print('Checking the accuracy of the original model:')
     metric = validate(model, data_source)
     print(f'F1 score: {metric:.4f}')
-
+    
     print('Checking the accuracy of the quantized model:')
     metric = validate(quantized_model, data_source)
     print(f'F1 score: {metric:.4f}')
@@ -517,7 +531,7 @@ Frames Per Second (FPS) for images.
     num_samples = 50
     sample = data_source[0]
     inputs = {k: torch.unsqueeze(torch.tensor(sample[k]), 0) for k in ['input_ids', 'token_type_ids', 'attention_mask']}
-
+    
     with torch.no_grad():
         start = time.perf_counter()
         for _ in range(num_samples):
@@ -528,7 +542,7 @@ Frames Per Second (FPS) for images.
         f"PyTorch model on CPU: {time_torch / num_samples:.3f} seconds per sentence, "
         f"SPS: {num_samples / time_torch:.2f}"
     )
-
+    
     start = time.perf_counter()
     for _ in range(num_samples):
         compiled_model(inputs)
@@ -538,7 +552,7 @@ Frames Per Second (FPS) for images.
         f"IR FP32 model in OpenVINO Runtime/{device.value}: {time_ir / num_samples:.3f} "
         f"seconds per sentence, SPS: {num_samples / time_ir:.2f}"
     )
-
+    
     start = time.perf_counter()
     for _ in range(num_samples):
         compiled_quantized_model(inputs)
@@ -557,17 +571,17 @@ Frames Per Second (FPS) for images.
 
 .. parsed-literal::
 
-    PyTorch model on CPU: 0.073 seconds per sentence, SPS: 13.77
+    PyTorch model on CPU: 0.073 seconds per sentence, SPS: 13.63
 
 
 .. parsed-literal::
 
-    IR FP32 model in OpenVINO Runtime/AUTO: 0.021 seconds per sentence, SPS: 47.89
+    IR FP32 model in OpenVINO Runtime/AUTO: 0.020 seconds per sentence, SPS: 48.91
 
 
 .. parsed-literal::
 
-    OpenVINO IR INT8 model in OpenVINO Runtime/AUTO: 0.009 seconds per sentence, SPS: 109.72
+    OpenVINO IR INT8 model in OpenVINO Runtime/AUTO: 0.009 seconds per sentence, SPS: 112.60
 
 
 Finally, measure the inference performance of OpenVINO ``FP32`` and
@@ -575,7 +589,7 @@ Finally, measure the inference performance of OpenVINO ``FP32`` and
 Tool `__
 in OpenVINO.
 
-   **NOTE**: The ``benchmark_app`` tool is able to measure the
+   **Note**: The ``benchmark_app`` tool is able to measure the
    performance of the OpenVINO Intermediate Representation (OpenVINO IR)
    models only. For more accurate performance, run ``benchmark_app`` in
    a terminal/command prompt after closing other applications. Run
@@ -597,24 +611,24 @@ in OpenVINO.
     [Step 2/11] Loading OpenVINO Runtime
     [ WARNING ] Default duration 120 seconds is used for unknown device device.value
     [ INFO ] OpenVINO:
-    [ INFO ] Build ................................. 2023.3.0-13775-ceeafaf64f3-releases/2023/3
-    [ INFO ]
+    [ INFO ] Build ................................. 2024.0.0-14509-34caeefd078-releases/2024/0
+    [ INFO ] 
     [ INFO ] Device info:
-    [ INFO ]
-    [ INFO ]
+    [ INFO ] 
+    [ INFO ] 
     [Step 3/11] Setting device configuration
-    [ ERROR ] Exception from src/inference/src/core.cpp:228:
-    Exception from src/inference/src/dev/core_impl.cpp:560:
+    [ ERROR ] Exception from src/inference/src/cpp/core.cpp:216:
+    Exception from src/inference/src/dev/core_impl.cpp:556:
     Device with "device" name is not registered in the OpenVINO Runtime
-
+    
     Traceback (most recent call last):
-      File "/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/tools/benchmark/main.py", line 166, in main
+      File "/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/tools/benchmark/main.py", line 166, in main
         supported_properties = benchmark.core.get_property(device, properties.supported_properties())
-    RuntimeError: Exception from src/inference/src/core.cpp:228:
-    Exception from src/inference/src/dev/core_impl.cpp:560:
+    RuntimeError: Exception from src/inference/src/cpp/core.cpp:216:
+    Exception from src/inference/src/dev/core_impl.cpp:556:
     Device with "device" name is not registered in the OpenVINO Runtime
-
-
+    
+    
 
 
 .. code:: ipython3
@@ -630,22 +644,22 @@ in OpenVINO.
     [Step 2/11] Loading OpenVINO Runtime
     [ WARNING ] Default duration 120 seconds is used for unknown device device.value
     [ INFO ] OpenVINO:
-    [ INFO ] Build ................................. 2023.3.0-13775-ceeafaf64f3-releases/2023/3
-    [ INFO ]
+    [ INFO ] Build ................................. 2024.0.0-14509-34caeefd078-releases/2024/0
+    [ INFO ] 
     [ INFO ] Device info:
-    [ INFO ]
-    [ INFO ]
+    [ INFO ] 
+    [ INFO ] 
     [Step 3/11] Setting device configuration
-    [ ERROR ] Exception from src/inference/src/core.cpp:228:
-    Exception from src/inference/src/dev/core_impl.cpp:560:
+    [ ERROR ] Exception from src/inference/src/cpp/core.cpp:216:
+    Exception from src/inference/src/dev/core_impl.cpp:556:
     Device with "device" name is not registered in the OpenVINO Runtime
-
+    
     Traceback (most recent call last):
-      File "/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/tools/benchmark/main.py", line 166, in main
+      File "/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/tools/benchmark/main.py", line 166, in main
         supported_properties = benchmark.core.get_property(device, properties.supported_properties())
-    RuntimeError: Exception from src/inference/src/core.cpp:228:
-    Exception from src/inference/src/dev/core_impl.cpp:560:
+    RuntimeError: Exception from src/inference/src/cpp/core.cpp:216:
+    Exception from src/inference/src/dev/core_impl.cpp:556:
     Device with "device" name is not registered in the OpenVINO Runtime
-
-
+    
+    
 
diff --git a/docs/notebooks/106-auto-device-with-output.rst b/docs/notebooks/106-auto-device-with-output.rst
index 15bd71f3f2e9f7..d3268cb229f281 100644
--- a/docs/notebooks/106-auto-device-with-output.rst
+++ b/docs/notebooks/106-auto-device-with-output.rst
@@ -81,13 +81,13 @@ Import modules and create Core
 
     import time
     import sys
-
+    
     import openvino as ov
-
+    
     from IPython.display import Markdown, display
-
+    
     core = ov.Core()
-
+    
     if "GPU" not in core.available_devices:
         display(Markdown('
Warning: A GPU device is not available. This notebook requires GPU device to have meaningful results.
')) @@ -127,11 +127,11 @@ For more information about model conversion API, see this import torchvision from pathlib import Path - + base_model_dir = Path("./model") base_model_dir.mkdir(exist_ok=True) model_path = base_model_dir / "resnet50.xml" - + if not model_path.exists(): pt_model = torchvision.models.resnet50(weights="DEFAULT") ov_model = ov.convert_model(pt_model, input=[[1,3,224,224]]) @@ -164,24 +164,25 @@ By default, ``compile_model`` API will select **AUTO** as # Set LOG_LEVEL to LOG_INFO. core.set_property("AUTO", {"LOG_LEVEL":"LOG_INFO"}) - + # Load the model onto the target device. compiled_model = core.compile_model(ov_model) - + if isinstance(compiled_model, ov.CompiledModel): - print("Successfully compiled model without a device_name.") + print("Successfully compiled model without a device_name.") .. parsed-literal:: - [22:41:31.9445]I[plugin.cpp:536][AUTO] device:CPU, config:PERFORMANCE_HINT=LATENCY - [22:41:31.9445]I[plugin.cpp:536][AUTO] device:CPU, config:PERFORMANCE_HINT_NUM_REQUESTS=0 - [22:41:31.9445]I[plugin.cpp:536][AUTO] device:CPU, config:PERF_COUNT=NO - [22:41:31.9445]I[plugin.cpp:541][AUTO] device:CPU, priority:0 - [22:41:31.9446]I[schedule.cpp:17][AUTO] scheduler starting - [22:41:31.9446]I[auto_schedule.cpp:131][AUTO] select device:CPU - [22:41:32.0858]I[auto_schedule.cpp:109][AUTO] device:CPU compiling model finished - [22:41:32.0860]I[plugin.cpp:569][AUTO] underlying hardware does not support hardware context + [22:24:04.4814]I[plugin.cpp:418][AUTO] device:CPU, config:LOG_LEVEL=LOG_INFO + [22:24:04.4815]I[plugin.cpp:418][AUTO] device:CPU, config:PERFORMANCE_HINT=LATENCY + [22:24:04.4815]I[plugin.cpp:418][AUTO] device:CPU, config:PERFORMANCE_HINT_NUM_REQUESTS=0 + [22:24:04.4815]I[plugin.cpp:418][AUTO] device:CPU, config:PERF_COUNT=NO + [22:24:04.4815]I[plugin.cpp:423][AUTO] device:CPU, priority:0 + [22:24:04.4815]I[schedule.cpp:17][AUTO] scheduler starting + [22:24:04.4815]I[auto_schedule.cpp:131][AUTO] select device:CPU + [22:24:04.6260]I[auto_schedule.cpp:109][AUTO] device:CPU compiling model finished + [22:24:04.6262]I[plugin.cpp:451][AUTO] underlying hardware does not support hardware context Successfully compiled model without a device_name. @@ -194,8 +195,8 @@ By default, ``compile_model`` API will select **AUTO** as .. parsed-literal:: + [22:24:04.6373]I[schedule.cpp:303][AUTO] scheduler ending Deleted compiled_model - [22:41:32.0982]I[schedule.cpp:303][AUTO] scheduler ending Explicitly pass AUTO as device_name to Core::compile_model API @@ -210,9 +211,9 @@ improve readability of your code. # Set LOG_LEVEL to LOG_NONE. core.set_property("AUTO", {"LOG_LEVEL":"LOG_NONE"}) - + compiled_model = core.compile_model(model=ov_model, device_name="AUTO") - + if isinstance(compiled_model, ov.CompiledModel): print("Successfully compiled model using AUTO.") @@ -271,16 +272,16 @@ function, we will reuse it for preparing input data. .. code:: ipython3 from PIL import Image - + # Download the image from the openvino_notebooks storage image_filename = download_file( "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/coco.jpg", directory="data" ) - + image = Image.open(str(image_filename)) input_transform = torchvision.models.ResNet50_Weights.DEFAULT.transforms() - + input_tensor = input_transform(image) input_tensor = input_tensor.unsqueeze(0).numpy() image @@ -307,14 +308,14 @@ Load the model to GPU device and perform inference if "GPU" not in core.available_devices: print(f"A GPU device is not available. Available devices are: {core.available_devices}") - else : + else : # Start time. gpu_load_start_time = time.perf_counter() compiled_model = core.compile_model(model=ov_model, device_name="GPU") # load to GPU - + # Execute the first inference. results = compiled_model(input_tensor)[0] - + # Measure time to the first inference. gpu_fil_end_time = time.perf_counter() gpu_fil_span = gpu_fil_end_time - gpu_load_start_time @@ -340,11 +341,11 @@ executed on CPU until GPU is ready. # Start time. auto_load_start_time = time.perf_counter() compiled_model = core.compile_model(model=ov_model) # The device_name is AUTO by default. - + # Execute the first inference. results = compiled_model(input_tensor)[0] - - + + # Measure time to the first inference. auto_fil_end_time = time.perf_counter() auto_fil_span = auto_fil_end_time - auto_load_start_time @@ -406,11 +407,11 @@ Class and callback definition """ self.fps = 0 self.latency = 0 - + self.start_time = time.perf_counter() self.latency_list = [] self.interval = interval - + def update(self, infer_request: ov.InferRequest) -> bool: """ Update the metrics if current ongoing @interval seconds duration is expired. Record the latency only if it is not expired. @@ -432,12 +433,12 @@ Class and callback definition return True else : return False - - + + class InferContext: """ Inference context. Record and update peforamnce metrics via @metrics, set @feed_inference to False once @remaining_update_num <=0 - :member: metrics: instance of class PerformanceMetrics + :member: metrics: instance of class PerformanceMetrics :member: remaining_update_num: the remaining times for peforamnce metrics updating. :member: feed_inference: if feed inference request is required or not. """ @@ -452,7 +453,7 @@ Class and callback definition self.metrics = PerformanceMetrics(update_interval) self.remaining_update_num = num self.feed_inference = True - + def update(self, infer_request: ov.InferRequest): """ Update the context. Set @feed_inference to False if the number of remaining performance metric updates (@remaining_update_num) reaches 0 @@ -461,13 +462,13 @@ Class and callback definition """ if self.remaining_update_num <= 0 : self.feed_inference = False - + if self.metrics.update(infer_request) : self.remaining_update_num = self.remaining_update_num - 1 if self.remaining_update_num <= 0 : self.feed_inference = False - - + + def completion_callback(infer_request: ov.InferRequest, context) -> None: """ callback for the inference request, pass the @infer_request to @context for updating @@ -476,8 +477,8 @@ Class and callback definition :returns: None """ context.update(infer_request) - - + + # Performance metrics update interval (seconds) and number of times. metrics_update_interval = 10 metrics_update_num = 6 @@ -493,29 +494,29 @@ Loop for inference and update the FPS/Latency every .. code:: ipython3 THROUGHPUT_hint_context = InferContext(metrics_update_interval, metrics_update_num) - + print("Compiling Model for AUTO device with THROUGHPUT hint") sys.stdout.flush() - + compiled_model = core.compile_model(model=ov_model, config={"PERFORMANCE_HINT":"THROUGHPUT"}) - + infer_queue = ov.AsyncInferQueue(compiled_model, 0) # Setting to 0 will query optimal number by default. infer_queue.set_callback(completion_callback) - + print(f"Start inference, {metrics_update_num: .0f} groups of FPS/latency will be measured over {metrics_update_interval: .0f}s intervals") sys.stdout.flush() - + while THROUGHPUT_hint_context.feed_inference: infer_queue.start_async(input_tensor, THROUGHPUT_hint_context) - + infer_queue.wait_all() - + # Take the FPS and latency of the latest period. THROUGHPUT_hint_fps = THROUGHPUT_hint_context.metrics.fps THROUGHPUT_hint_latency = THROUGHPUT_hint_context.metrics.latency - + print("Done") - + del compiled_model @@ -531,32 +532,32 @@ Loop for inference and update the FPS/Latency every .. parsed-literal:: - throughput: 179.69fps, latency: 31.58ms, time interval: 10.00s + throughput: 181.84fps, latency: 31.28ms, time interval: 10.00s .. parsed-literal:: - throughput: 182.30fps, latency: 32.10ms, time interval: 10.00s + throughput: 183.53fps, latency: 31.88ms, time interval: 10.01s .. parsed-literal:: - throughput: 180.62fps, latency: 32.36ms, time interval: 10.02s + throughput: 182.49fps, latency: 32.10ms, time interval: 10.00s .. parsed-literal:: - throughput: 179.76fps, latency: 32.61ms, time interval: 10.00s + throughput: 183.45fps, latency: 31.92ms, time interval: 10.00s .. parsed-literal:: - throughput: 180.36fps, latency: 32.36ms, time interval: 10.02s + throughput: 182.69fps, latency: 32.02ms, time interval: 10.00s .. parsed-literal:: - throughput: 179.77fps, latency: 32.58ms, time interval: 10.00s + throughput: 181.11fps, latency: 32.34ms, time interval: 10.02s .. parsed-literal:: @@ -575,30 +576,30 @@ Loop for inference and update the FPS/Latency for each .. code:: ipython3 LATENCY_hint_context = InferContext(metrics_update_interval, metrics_update_num) - + print("Compiling Model for AUTO Device with LATENCY hint") sys.stdout.flush() - + compiled_model = core.compile_model(model=ov_model, config={"PERFORMANCE_HINT":"LATENCY"}) - + # Setting to 0 will query optimal number by default. infer_queue = ov.AsyncInferQueue(compiled_model, 0) infer_queue.set_callback(completion_callback) - + print(f"Start inference, {metrics_update_num: .0f} groups fps/latency will be out with {metrics_update_interval: .0f}s interval") sys.stdout.flush() - + while LATENCY_hint_context.feed_inference: infer_queue.start_async(input_tensor, LATENCY_hint_context) - + infer_queue.wait_all() - + # Take the FPS and latency of the latest period. LATENCY_hint_fps = LATENCY_hint_context.metrics.fps LATENCY_hint_latency = LATENCY_hint_context.metrics.latency - + print("Done") - + del compiled_model @@ -614,32 +615,32 @@ Loop for inference and update the FPS/Latency for each .. parsed-literal:: - throughput: 139.27fps, latency: 6.65ms, time interval: 10.00s + throughput: 138.83fps, latency: 6.64ms, time interval: 10.01s .. parsed-literal:: - throughput: 141.22fps, latency: 6.62ms, time interval: 10.01s + throughput: 141.03fps, latency: 6.64ms, time interval: 10.00s .. parsed-literal:: - throughput: 140.71fps, latency: 6.64ms, time interval: 10.01s + throughput: 140.77fps, latency: 6.64ms, time interval: 10.00s .. parsed-literal:: - throughput: 141.11fps, latency: 6.63ms, time interval: 10.01s + throughput: 141.80fps, latency: 6.65ms, time interval: 10.01s .. parsed-literal:: - throughput: 141.26fps, latency: 6.62ms, time interval: 10.00s + throughput: 142.26fps, latency: 6.66ms, time interval: 10.00s .. parsed-literal:: - throughput: 141.18fps, latency: 6.63ms, time interval: 10.00s + throughput: 141.36fps, latency: 6.63ms, time interval: 10.00s .. parsed-literal:: @@ -655,21 +656,21 @@ Difference in FPS and latency .. code:: ipython3 import matplotlib.pyplot as plt - + TPUT = 0 LAT = 1 labels = ["THROUGHPUT hint", "LATENCY hint"] - - fig1, ax1 = plt.subplots(1, 1) + + fig1, ax1 = plt.subplots(1, 1) fig1.patch.set_visible(False) - ax1.axis('tight') - ax1.axis('off') - + ax1.axis('tight') + ax1.axis('off') + cell_text = [] cell_text.append(['%.2f%s' % (THROUGHPUT_hint_fps," FPS"), '%.2f%s' % (THROUGHPUT_hint_latency, " ms")]) cell_text.append(['%.2f%s' % (LATENCY_hint_fps," FPS"), '%.2f%s' % (LATENCY_hint_latency, " ms")]) - - table = ax1.table(cellText=cell_text, colLabels=["FPS (Higher is better)", "Latency (Lower is better)"], rowLabels=labels, + + table = ax1.table(cellText=cell_text, colLabels=["FPS (Higher is better)", "Latency (Lower is better)"], rowLabels=labels, rowColours=["deepskyblue"] * 2, colColours=["deepskyblue"] * 2, cellLoc='center', loc='upper left') table.auto_set_font_size(False) @@ -677,7 +678,7 @@ Difference in FPS and latency table.auto_set_column_width(0) table.auto_set_column_width(1) table.scale(1, 3) - + fig1.tight_layout() plt.show() @@ -691,28 +692,28 @@ Difference in FPS and latency # Output the difference. width = 0.4 fontsize = 14 - + plt.rc('font', size=fontsize) fig, ax = plt.subplots(1,2, figsize=(10, 8)) - + rects1 = ax[0].bar([0], THROUGHPUT_hint_fps, width, label=labels[TPUT], color='#557f2d') rects2 = ax[0].bar([width], LATENCY_hint_fps, width, label=labels[LAT]) ax[0].set_ylabel("frames per second") - ax[0].set_xticks([width / 2]) + ax[0].set_xticks([width / 2]) ax[0].set_xticklabels(["FPS"]) ax[0].set_xlabel("Higher is better") - + rects1 = ax[1].bar([0], THROUGHPUT_hint_latency, width, label=labels[TPUT], color='#557f2d') rects2 = ax[1].bar([width], LATENCY_hint_latency, width, label=labels[LAT]) ax[1].set_ylabel("milliseconds") ax[1].set_xticks([width / 2]) ax[1].set_xticklabels(["Latency (ms)"]) ax[1].set_xlabel("Lower is better") - + fig.suptitle('Performance Hints') fig.legend(labels, fontsize=fontsize) fig.tight_layout() - + plt.show() diff --git a/docs/notebooks/106-auto-device-with-output_files/106-auto-device-with-output_27_0.png b/docs/notebooks/106-auto-device-with-output_files/106-auto-device-with-output_27_0.png index 73dff3229ae4f3..dda596f1105e3e 100644 --- a/docs/notebooks/106-auto-device-with-output_files/106-auto-device-with-output_27_0.png +++ b/docs/notebooks/106-auto-device-with-output_files/106-auto-device-with-output_27_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:acc35ef9a1d6c2acf74782263f65bd81b65d7fd6663a146ffb88c9a9b64e343d -size 26542 +oid sha256:8ce150e30514fd5c85f9d29b953b41d9cbd0c4f58c2289513286a4aef99c3984 +size 26193 diff --git a/docs/notebooks/106-auto-device-with-output_files/106-auto-device-with-output_28_0.png b/docs/notebooks/106-auto-device-with-output_files/106-auto-device-with-output_28_0.png index 6ea25daababa4a..682ea8ddc7501a 100644 --- a/docs/notebooks/106-auto-device-with-output_files/106-auto-device-with-output_28_0.png +++ b/docs/notebooks/106-auto-device-with-output_files/106-auto-device-with-output_28_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab4df6ece8c31c9c5d5ab7d51b7d628ef7fb5df7193d859b8cfd9547273b76e3 -size 39977 +oid sha256:7b1eaf56574fadd9331d380d2f057dd74775ebacdaa34fff9a1af2fe382128ab +size 40039 diff --git a/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst b/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst index 7539979e60dd26..38d945e74af729 100644 --- a/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst +++ b/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst @@ -1,5 +1,5 @@ -Quantize Speech Recognition Models using NNCF PTQ API -===================================================== +Quantize Data2Vec Speech Recognition Model using NNCF PTQ API +============================================================= This tutorial demonstrates how to use the NNCF (Neural Network Compression Framework) 8-bit quantization in post-training mode (without @@ -80,13 +80,13 @@ model specific pre- and post-processing steps. .. code:: ipython3 %pip install -q "openvino>=2023.3.0" "nncf>=2.7" - %pip install datasets "torchmetrics>=0.11.0" "torch>=2.1.0" --extra-index-url https://download.pytorch.org/whl/cpu + %pip install -q datasets "torchmetrics>=0.11.0" "torch>=2.1.0" --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q soundfile librosa "transformers>=4.36.2" --extra-index-url https://download.pytorch.org/whl/cpu .. code:: ipython3 from transformers import Wav2Vec2Processor, Data2VecAudioForCTC - + processor = Wav2Vec2Processor.from_pretrained("facebook/data2vec-audio-base-960h") model = Data2VecAudioForCTC.from_pretrained("facebook/data2vec-audio-base-960h") @@ -106,14 +106,14 @@ Convert model to OpenVINO Intermediate Representation import openvino as ov import torch - + core = ov.Core() - + BATCH_SIZE = 1 MAX_SEQ_LENGTH = 30480 - + ir_model_path = MODEL_DIR / "data2vec-audo-base.xml" - + if not ir_model_path.exists(): ov_model = ov.convert_model(model, example_input=torch.zeros([1, MAX_SEQ_LENGTH], dtype=torch.float)) ov.save_model(ov_model, str(ir_model_path)) @@ -137,21 +137,21 @@ dataset. .. code:: ipython3 from datasets import load_dataset - + ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") - - + + # define preprocessing function for converting audio to input values for model def map_to_input(batch): preprocessed_signal = processor(batch["audio"]["array"], return_tensors="pt", padding="longest", sampling_rate=batch['audio']['sampling_rate']) input_values = preprocessed_signal.input_values batch['input_values'] = input_values return batch - - + + # apply preprocessing function to dataset and remove audio column, to save memory as we do not need it anymore dataset = ds.map(map_to_input, batched=False, remove_columns=["audio"]) - + test_sample = ds[0]["audio"] Check model inference result @@ -172,8 +172,8 @@ For reference, see the same function provided for OpenVINO model. .. code:: ipython3 import numpy as np - - + + # inference function for pytorch def torch_infer(model, sample): logits = model(torch.Tensor(sample['input_values'])).logits @@ -181,8 +181,8 @@ For reference, see the same function provided for OpenVINO model. predicted_ids = torch.argmax(logits, dim=-1) transcription = processor.batch_decode(predicted_ids) return transcription - - + + # inference function for openvino def ov_infer(model, sample): output = model.output(0) @@ -191,18 +191,43 @@ For reference, see the same function provided for OpenVINO model. transcription = processor.batch_decode(torch.from_numpy(predicted_ids)) return transcription +Select inference device for OpenVINO + .. code:: ipython3 + import ipywidgets as widgets + core = ov.Core() + device = widgets.Dropdown( + options=core.available_devices + ["AUTO"], + value='AUTO', + description='Device:', + disabled=False, + ) + + device + + + + +.. parsed-literal:: + + Dropdown(description='Device:', index=4, options=('CPU', 'GPU.0', 'GPU.1', 'GPU.2', 'AUTO'), value='AUTO') + + +.. code:: ipython3 + + core = ov.Core() + pt_transcription = torch_infer(model, dataset[0]) - compiled_model = core.compile_model(ov_model) + compiled_model = core.compile_model(ov_model, device.value) ov_transcription = ov_infer(compiled_model, dataset[0]) .. code:: ipython3 import IPython.display as ipd - + print(f"[Reference]: {dataset[0]['text']}") print(f"[PyTorch]: {pt_transcription[0]}") print(f"[OpenVINO FP16]: {ov_transcription[0]}") @@ -220,7 +245,7 @@ For reference, see the same function provided for OpenVINO model. .. raw:: html - +
- diff --git a/docs/notebooks/236-stable-diffusion-v2-optimum-demo-comparison-with-output.rst b/docs/notebooks/236-stable-diffusion-v2-optimum-demo-comparison-with-output.rst deleted file mode 100644 index 565d091f251d6a..00000000000000 --- a/docs/notebooks/236-stable-diffusion-v2-optimum-demo-comparison-with-output.rst +++ /dev/null @@ -1,264 +0,0 @@ -Stable Diffusion v2.1 using Optimum-Intel OpenVINO and multiple Intel Hardware -============================================================================== - -This notebook will provide you a way to see different precision models -performing in different hardware. This notebook was done for showing -case the use of Optimum-Intel-OpenVINO and it is not optimized for -running multiple times. - -|image0| - -Table of contents: -^^^^^^^^^^^^^^^^^^ - -- `Showing Info Available Devices <#showing-info-available-devices>`__ -- `Using full precision model in CPU with - StableDiffusionPipeline <#using-full-precision-model-in-cpu-with-stablediffusionpipeline>`__ -- `Using full precision model in CPU with - OVStableDiffusionPipeline <#using-full-precision-model-in-cpu-with-ovstablediffusionpipeline>`__ -- `Using full precision model in dGPU with - OVStableDiffusionPipeline <#using-full-precision-model-in-dgpu-with-ovstablediffusionpipeline>`__ - -.. |image0| image:: https://github.com/openvinotoolkit/openvino_notebooks/assets/10940214/1858dae4-72fd-401e-b055-66d503d82446 - -Optimum Intel is the interface between the Transformers and Diffusers -libraries and the different tools and libraries provided by Intel to -accelerate end-to-end pipelines on Intel architectures. More details in -this -`repository `__. - -``Note: We suggest you to create a different environment and run the following installation command there.`` - -.. code:: ipython3 - - %pip install -q "optimum-intel[openvino,diffusers]@git+https://github.com/huggingface/optimum-intel.git" "ipywidgets" "transformers>=4.33.0" --extra-index-url https://download.pytorch.org/whl/cpu - -.. code:: ipython3 - - import warnings - warnings.filterwarnings('ignore') - -Showing Info Available Devices -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - - -The ``available_devices`` property shows the available devices in your -system. The “FULL_DEVICE_NAME” option to ``ie.get_property()`` shows the -name of the device. Check what is the ID name for the discrete GPU, if -you have integrated GPU (iGPU) and discrete GPU (dGPU), it will show -``device_name="GPU.0"`` for iGPU and ``device_name="GPU.1"`` for dGPU. -If you just have either an iGPU or dGPU that will be assigned to -``"GPU"`` - -Note: For more details about GPU with OpenVINO visit this -`link `__. -If you have been facing any issue in Ubuntu 20.04 or Windows 11 read -this -`blog `__. - -.. code:: ipython3 - - from openvino.runtime import Core - - ie = Core() - devices = ie.available_devices - - for device in devices: - device_name = ie.get_property(device, "FULL_DEVICE_NAME") - print(f"{device}: {device_name}") - - -.. parsed-literal:: - - CPU: Intel(R) Xeon(R) Gold 6348 CPU @ 2.60GHz - GPU: Intel(R) Data Center GPU Flex 170 (dGPU) - - -Using full precision model in CPU with ``StableDiffusionPipeline`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - - -.. code:: ipython3 - - from diffusers import StableDiffusionPipeline - - import gc - - model_id = "stabilityai/stable-diffusion-2-1-base" - pipe = StableDiffusionPipeline.from_pretrained(model_id) - pipe.save_pretrained("./stabilityai_cpu") - prompt = "red car in snowy forest" - output_cpu = pipe(prompt, num_inference_steps=17).images[0] - output_cpu.save("image_cpu.png") - output_cpu - - - - - -.. parsed-literal:: - - Fetching 13 files: 0%| | 0/13 [00:00`__ -- `Download Pre-Converted Stable Diffusion 2.1 - IR <#download-pre-converted-stable-diffusion-2-1-ir>`__ -- `Save the pre-trained models, Select the inference device and compile - it <#save-the-pre-trained-models-select-the-inference-device-and-compile-it>`__ -- `Be creative, add the prompt and enjoy the - result <#be-creative-add-the-prompt-and-enjoy-the-result>`__ +- `Configure Inference Pipeline <#configure-inference-pipeline>`__ +- `Using full precision model in choice device with + OVStableDiffusionPipeline <#using-full-precision-model-in-choice-device-with-ovstablediffusionpipeline>`__ .. |image0| image:: https://github.com/openvinotoolkit/openvino_notebooks/assets/10940214/1858dae4-72fd-401e-b055-66d503d82446 @@ -26,13 +28,7 @@ this .. code:: ipython3 - %pip install -q "optimum-intel[openvino,diffusers]@git+https://github.com/huggingface/optimum-intel.git" "ipywidgets" "transformers>=4.33" --extra-index-url https://download.pytorch.org/whl/cpu - - -.. parsed-literal:: - - Note: you may need to restart the kernel to use updated packages. - + %pip install -q "optimum-intel[openvino,diffusers]@git+https://github.com/huggingface/optimum-intel.git" "ipywidgets" "transformers>=4.33.0" --extra-index-url https://download.pytorch.org/whl/cpu Stable Diffusion pipeline should brings 6 elements together, a text encoder model with a tokenizer, a UNet model with and scheduler, and an @@ -44,16 +40,14 @@ Autoencoder with Decoder and Encoder models. image The base model used for this example is the -`stabilityai/stable-diffusion-2-1-base `__. -This model was converted to OpenVINO format, for accelerated inference -on CPU or Intel GPU with OpenVINO’s integration into Optimum: -``optimum-intel``. The model weights are stored with FP16 precision, -which reduces the size of the model by half. You can find the model used -in this notebook is -`helenai/stabilityai-stable-diffusion-2-1-base-ov `__. -Let’s download the pre-converted model Stable Diffusion 2.1 -`Intermediate Representation Format -(IR) `__ +stabilityai/stable-diffusion-2-1-base. This model was converted to +OpenVINO format, for accelerated inference on CPU or Intel GPU with +OpenVINO’s integration into Optimum. + +.. code:: ipython3 + + import warnings + warnings.filterwarnings('ignore') Showing Info Available Devices ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -68,19 +62,13 @@ you have integrated GPU (iGPU) and discrete GPU (dGPU), it will show If you just have either an iGPU or dGPU that will be assigned to ``"GPU"`` -Note: For more details about GPU with OpenVINO visit this -`link `__. -If you have been facing any issue in Ubuntu 20.04 or Windows 11 read -this -`blog `__. - .. code:: ipython3 from openvino.runtime import Core - + ie = Core() devices = ie.available_devices - + for device in devices: device_name = ie.get_property(device, "FULL_DEVICE_NAME") print(f"{device}: {device_name}") @@ -88,140 +76,65 @@ this .. parsed-literal:: - CPU: 13th Gen Intel(R) Core(TM) i9-13900K - GPU.0: Intel(R) UHD Graphics 770 (iGPU) - GPU.1: Intel(R) Arc(TM) A770 Graphics (dGPU) - - -Download Pre-Converted Stable Diffusion 2.1 IR -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - - -.. code:: ipython3 - - from optimum.intel.openvino import OVStableDiffusionPipeline - # download the pre-converted SD v2.1 model from Hugging Face Hub - name = "helenai/stabilityai-stable-diffusion-2-1-base-ov" - - pipe = OVStableDiffusionPipeline.from_pretrained(name, compile=False) - pipe.reshape(batch_size=1, height=512, width=512, num_images_per_prompt=1) - - -.. parsed-literal:: - - 2023-07-03 11:16:29.469651: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-07-03 11:16:29.507431: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. - To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-07-03 11:16:30.127586: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT - + CPU: Intel(R) Core(TM) Ultra 7 155H + GPU: Intel(R) Arc(TM) Graphics (iGPU) + NPU: Intel(R) AI Boost -.. parsed-literal:: - INFO:nncf:NNCF initialized successfully. Supported frameworks detected: torch, tensorflow, onnx, openvino +Configure Inference Pipeline +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. parsed-literal:: - No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' - /home/ea/work/notebooks_convert/notebooks_conv_env/lib/python3.8/site-packages/diffusers/models/cross_attention.py:30: FutureWarning: Importing from cross_attention is deprecated. Please import from diffusers.models.attention_processor instead. - deprecate( - comet_ml is installed but `COMET_API_KEY` is not set. - The config attributes {'requires_safety_checker': False, 'safety_checker': [None, None]} were passed to OVStableDiffusionPipeline, but are not expected and will be ignored. Please verify your model_index.json configuration file. +Select device from dropdown list for running inference using OpenVINO. +.. code:: ipython3 + import ipywidgets as widgets + + device = widgets.Dropdown( + options=ie.available_devices + ["AUTO"], + value="CPU", + description="Device:", + disabled=False, + ) + + device -.. parsed-literal:: - Fetching 13 files: 0%| | 0/13 [00:00`__. +4. Run Stable Diffusion v2 Text-to-Image pipeline with OpenVINO. **Note:** This is the full version of the Stable Diffusion text-to-image implementation. If you would like to get started and run the notebook @@ -92,7 +94,15 @@ Table of contents: - `VAE <#vae>`__ - `Prepare Inference Pipeline <#prepare-inference-pipeline>`__ - `Configure Inference Pipeline <#configure-inference-pipeline>`__ - - `Run Text-to-Image generation <#run-text-to-image-generation>`__ + +- `Quantization <#quantization>`__ + + - `Prepare calibration dataset <#prepare-calibration-dataset>`__ + - `Run Hybrid Model Quantization <#run-hybrid-model-quantization>`__ + - `Compare inference time of the FP16 and INT8 + pipelines <#compare-inference-time-of-the-fp16-and-int8-pipelines>`__ + +- `Run Text-to-Image generation <#run-text-to-image-generation>`__ Prerequisites ------------- @@ -103,7 +113,14 @@ install required packages .. code:: ipython3 - %pip install -q "diffusers>=0.14.0" "openvino>=2023.1.0" "transformers>=4.25.1" gradio --extra-index-url https://download.pytorch.org/whl/cpu + %pip install -q "diffusers>=0.14.0" "openvino>=2023.1.0" "datasets>=2.14.6" "transformers>=4.25.1" gradio --extra-index-url https://download.pytorch.org/whl/cpu + %pip install -q "nncf>=2.9.0" + + +.. parsed-literal:: + + Note: you may need to restart the kernel to use updated packages. + Stable Diffusion v2 for Text-to-Image Generation ------------------------------------------------ @@ -138,9 +155,9 @@ using ``stable-diffusion-2-1``: .. code:: ipython3 from diffusers import StableDiffusionPipeline - + pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to("cpu") - + # for reducing memory consumption get all components from pipeline independently text_encoder = pipe.text_encoder text_encoder.eval() @@ -148,24 +165,16 @@ using ``stable-diffusion-2-1``: unet.eval() vae = pipe.vae vae.eval() - + conf = pipe.scheduler.config - + del pipe -.. parsed-literal:: - - 2023-08-29 22:18:10.107478: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-08-29 22:18:10.146633: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. - To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-08-29 22:18:10.895453: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT - - .. parsed-literal:: - Fetching 13 files: 0%| | 0/13 [00:00 x_t-1 latents = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs)["prev_sample"].numpy() # scale and decode the image latents with vae image = self.vae_decoder(latents * (1 / 0.18215))[self._vae_d_output] - + image = self.postprocess_image(image, meta, output_type) return {"sample": image} - + def _encode_prompt(self, prompt:Union[str, List[str]], num_images_per_prompt:int = 1, do_classifier_free_guidance:bool = True, negative_prompt:Union[str, List[str]] = None): """ Encodes the prompt into text encoder hidden states. - + Parameters: prompt (str or list(str)): prompt to be encoded num_images_per_prompt (int): number of images that should be generated per prompt @@ -744,7 +754,7 @@ but there is some small difference in details: text_embeddings (np.ndarray): text encoder hidden states """ batch_size = len(prompt) if isinstance(prompt, list) else 1 - + # tokenize input prompts text_inputs = self.tokenizer( prompt, @@ -754,10 +764,10 @@ but there is some small difference in details: return_tensors="np", ) text_input_ids = text_inputs.input_ids - + text_embeddings = self.text_encoder( text_input_ids)[self._text_encoder_output] - + # duplicate text embeddings for each generation per prompt if num_images_per_prompt != 1: bs_embed, seq_len, _ = text_embeddings.shape @@ -765,7 +775,7 @@ but there is some small difference in details: text_embeddings, (1, num_images_per_prompt, 1)) text_embeddings = np.reshape( text_embeddings, (bs_embed * num_images_per_prompt, seq_len, -1)) - + # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] @@ -783,25 +793,25 @@ but there is some small difference in details: truncation=True, return_tensors="np", ) - + uncond_embeddings = self.text_encoder(uncond_input.input_ids)[self._text_encoder_output] - + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = np.tile(uncond_embeddings, (1, num_images_per_prompt, 1)) uncond_embeddings = np.reshape(uncond_embeddings, (batch_size * num_images_per_prompt, seq_len, -1)) - + # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = np.concatenate([uncond_embeddings, text_embeddings]) - + return text_embeddings - + def prepare_latents(self, image:PIL.Image.Image = None, latent_timestep:torch.Tensor = None): """ Function for getting initial latents for starting generation - + Parameters: image (PIL.Image.Image, *optional*, None): Input image for generation, if not provided randon noise will be used as starting point @@ -823,12 +833,12 @@ but there is some small difference in details: latents = latents * 0.18215 latents = self.scheduler.add_noise(torch.from_numpy(latents), torch.from_numpy(noise), latent_timestep).numpy() return latents, meta - + def postprocess_image(self, image:np.ndarray, meta:Dict, output_type:str = "pil"): """ - Postprocessing for decoded image. Takes generated image decoded by VAE decoder, unpad it to initila image size (if required), + Postprocessing for decoded image. Takes generated image decoded by VAE decoder, unpad it to initila image size (if required), normalize and convert to [0, 255] pixels range. Optionally, convertes it from np.ndarray to PIL.Image format - + Parameters: image (np.ndarray): Generated image @@ -862,26 +872,26 @@ but there is some small difference in details: image = [cv2.resize(img, (orig_width, orig_width)) for img in image] return image - + def get_timesteps(self, num_inference_steps:int, strength:float): """ Helper function for getting scheduler timesteps for generation In case of image-to-image generation, it updates number of steps according to strength - + Parameters: num_inference_steps (int): number of inference steps for generation strength (float): - value between 0.0 and 1.0, that controls the amount of noise that is added to the input image. + value between 0.0 and 1.0, that controls the amount of noise that is added to the input image. Values that approach 1.0 allow for lots of variations but will also produce images that are not semantically consistent with the input. """ # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) - + t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] - - return timesteps, num_inference_steps - t_start + + return timesteps, num_inference_steps - t_start Configure Inference Pipeline ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -893,7 +903,7 @@ First, you should create instances of OpenVINO Model. .. code:: ipython3 import ipywidgets as widgets - + core = ov.Core() device = widgets.Dropdown( options=core.available_devices + ["AUTO"], @@ -901,7 +911,7 @@ First, you should create instances of OpenVINO Model. description='Device:', disabled=False, ) - + device @@ -909,14 +919,14 @@ First, you should create instances of OpenVINO Model. .. parsed-literal:: - Dropdown(description='Device:', index=2, options=('CPU', 'GNA', 'AUTO'), value='AUTO') + Dropdown(description='Device:', index=4, options=('CPU', 'GPU.0', 'GPU.1', 'GPU.2', 'AUTO'), value='AUTO') .. code:: ipython3 ov_config = {"INFERENCE_PRECISION_HINT": "f32"} if device.value != "CPU" else {} - + text_enc = core.compile_model(TEXT_ENCODER_OV_PATH, device.value) unet_model = core.compile_model(UNET_OV_PATH, device.value) vae_decoder = core.compile_model(VAE_DECODER_OV_PATH, device.value, ov_config) @@ -928,10 +938,10 @@ Let us define them and put all components together. .. code:: ipython3 from transformers import CLIPTokenizer - - scheduler = LMSDiscreteScheduler.from_config(conf) + + scheduler = DDIMScheduler.from_config(conf) # DDIMScheduler is used because UNet quantization produces better results with it tokenizer = CLIPTokenizer.from_pretrained('openai/clip-vit-large-patch14') - + ov_pipe = OVStableDiffusionPipeline( tokenizer=tokenizer, text_encoder=text_enc, @@ -941,8 +951,317 @@ Let us define them and put all components together. scheduler=scheduler ) +Quantization +------------ + + + +`NNCF `__ enables +post-training quantization by adding quantization layers into model +graph and then using a subset of the training dataset to initialize the +parameters of these additional quantization layers. Quantized operations +are executed in ``INT8`` instead of ``FP32``/``FP16`` making model +inference faster. + +According to ``Stable Diffusion v2`` structure, the UNet model takes up +significant portion of the overall pipeline execution time. Now we will +show you how to optimize the UNet part using +`NNCF `__ to reduce +computation cost and speed up the pipeline. Quantizing the rest of the +pipeline does not significantly improve inference performance but can +lead to a substantial degradation of accuracy. + +For this model we apply quantization in hybrid mode which means that we +quantize: (1) weights of MatMul and Embedding layers and (2) activations +of other layers. The steps are the following: + +1. Create a calibration dataset for quantization. +2. Collect operations with weights. +3. Run ``nncf.compress_model()`` to compress only the model weights. +4. Run ``nncf.quantize()`` on the compressed model with weighted + operations ignored by providing ``ignored_scope`` parameter. +5. Save the ``INT8`` model using ``openvino.save_model()`` function. + +Please select below whether you would like to run quantization to +improve model inference speed. + + **NOTE**: Quantization is time and memory consuming operation. + Running quantization code below may take some time. + +.. code:: ipython3 + + to_quantize = widgets.Checkbox( + value=True, + description='Quantization', + disabled=False, + ) + + to_quantize + + + + +.. parsed-literal:: + + Checkbox(value=True, description='Quantization') + + + +.. code:: ipython3 + + import sys + sys.path.append("../utils") + + int8_ov_pipe = None + + %load_ext skip_kernel_extension + +Prepare calibration dataset +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + + +We use a portion of +`conceptual_captions `__ +dataset from Hugging Face as calibration data. To collect intermediate +model inputs for calibration we should customize ``CompiledModel``. + +.. code:: ipython3 + + %%skip not $to_quantize.value + + import datasets + import numpy as np + from tqdm.notebook import tqdm + from typing import Any, Dict, List + + + def disable_progress_bar(pipeline, disable=True): + if not hasattr(pipeline, "_progress_bar_config"): + pipeline._progress_bar_config = {'disable': disable} + else: + pipeline._progress_bar_config['disable'] = disable + + + class CompiledModelDecorator(ov.CompiledModel): + def __init__(self, compiled_model: ov.CompiledModel, data_cache: List[Any] = None, keep_prob: float = 0.5): + super().__init__(compiled_model) + self.data_cache = data_cache if data_cache is not None else [] + self.keep_prob = keep_prob + + def __call__(self, *args, **kwargs): + if np.random.rand() <= self.keep_prob: + self.data_cache.append(*args) + return super().__call__(*args, **kwargs) + + + def collect_calibration_data(ov_pipe, calibration_dataset_size: int, num_inference_steps: int) -> List[Dict]: + original_unet = ov_pipe.unet + calibration_data = [] + ov_pipe.unet = CompiledModelDecorator(original_unet, calibration_data, keep_prob=0.7) + disable_progress_bar(ov_pipe) + + dataset = datasets.load_dataset("conceptual_captions", split="train").shuffle(seed=42) + + # Run inference for data collection + pbar = tqdm(total=calibration_dataset_size) + for batch in dataset: + prompt = batch["caption"] + if len(prompt) > ov_pipe.tokenizer.model_max_length: + continue + ov_pipe(prompt, num_inference_steps=num_inference_steps, seed=1) + pbar.update(len(calibration_data) - pbar.n) + if pbar.n >= calibration_dataset_size: + break + + disable_progress_bar(ov_pipe, disable=False) + ov_pipe.unet = original_unet + return calibration_data + +Run Hybrid Model Quantization +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + + +.. code:: ipython3 + + %%skip not $to_quantize.value + + from collections import deque + from transformers import set_seed + import nncf + + def get_operation_const_op(operation, const_port_id: int): + node = operation.input_value(const_port_id).get_node() + queue = deque([node]) + constant_node = None + allowed_propagation_types_list = ["Convert", "FakeQuantize", "Reshape"] + + while len(queue) != 0: + curr_node = queue.popleft() + if curr_node.get_type_name() == "Constant": + constant_node = curr_node + break + if len(curr_node.inputs()) == 0: + break + if curr_node.get_type_name() in allowed_propagation_types_list: + queue.append(curr_node.input_value(0).get_node()) + + return constant_node + + + def is_embedding(node) -> bool: + allowed_types_list = ["f16", "f32", "f64"] + const_port_id = 0 + input_tensor = node.input_value(const_port_id) + if input_tensor.get_element_type().get_type_name() in allowed_types_list: + const_node = get_operation_const_op(node, const_port_id) + if const_node is not None: + return True + + return False + + + def collect_ops_with_weights(model): + ops_with_weights = [] + for op in model.get_ops(): + if op.get_type_name() == "MatMul": + constant_node_0 = get_operation_const_op(op, const_port_id=0) + constant_node_1 = get_operation_const_op(op, const_port_id=1) + if constant_node_0 or constant_node_1: + ops_with_weights.append(op.get_friendly_name()) + if op.get_type_name() == "Gather" and is_embedding(op): + ops_with_weights.append(op.get_friendly_name()) + + return ops_with_weights + + UNET_INT8_OV_PATH = sd2_1_model_dir / 'unet_optimized.xml' + if not UNET_INT8_OV_PATH.exists(): + calibration_dataset_size = 300 + set_seed(1) + unet_calibration_data = collect_calibration_data(ov_pipe, + calibration_dataset_size=calibration_dataset_size, + num_inference_steps=50) + + unet = core.read_model(UNET_OV_PATH) + + # Collect operations which weights will be compressed + unet_ignored_scope = collect_ops_with_weights(unet) + + # Compress model weights + compressed_unet = nncf.compress_weights(unet, ignored_scope=nncf.IgnoredScope(types=['Convolution'])) + + # Quantize both weights and activations of Convolution layers + quantized_unet = nncf.quantize( + model=compressed_unet, + calibration_dataset=nncf.Dataset(unet_calibration_data), + subset_size=calibration_dataset_size, + model_type=nncf.ModelType.TRANSFORMER, + ignored_scope=nncf.IgnoredScope(names=unet_ignored_scope), + advanced_parameters=nncf.AdvancedQuantizationParameters(smooth_quant_alpha=-1) + ) + + ov.save_model(quantized_unet, UNET_INT8_OV_PATH) + + +.. parsed-literal:: + + INFO:nncf:NNCF initialized successfully. Supported frameworks detected: torch, onnx, openvino + + +.. code:: ipython3 + + %%skip not $to_quantize.value + + int8_unet_model = core.compile_model(UNET_INT8_OV_PATH, device.value) + int8_ov_pipe = OVStableDiffusionPipeline( + tokenizer=tokenizer, + text_encoder=text_enc, + unet=int8_unet_model, + vae_encoder=vae_encoder, + vae_decoder=vae_decoder, + scheduler=scheduler + ) + +Compare UNet file size +~~~~~~~~~~~~~~~~~~~~~~ + +.. code:: ipython3 + + %%skip not $to_quantize.value + + fp16_ir_model_size = UNET_OV_PATH.with_suffix(".bin").stat().st_size / 1024 + quantized_model_size = UNET_INT8_OV_PATH.with_suffix(".bin").stat().st_size / 1024 + + print(f"FP16 model size: {fp16_ir_model_size:.2f} KB") + print(f"INT8 model size: {quantized_model_size:.2f} KB") + print(f"Model compression rate: {fp16_ir_model_size / quantized_model_size:.3f}") + + +.. parsed-literal:: + + FP16 model size: 1691232.51 KB + INT8 model size: 846918.58 KB + Model compression rate: 1.997 + + +Compare inference time of the FP16 and INT8 pipelines +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + + +To measure the inference performance of the ``FP16`` and ``INT8`` +pipelines, we use median inference time on calibration subset. + + **NOTE**: For the most accurate performance estimation, it is + recommended to run ``benchmark_app`` in a terminal/command prompt + after closing other applications. + +.. code:: ipython3 + + %%skip not $to_quantize.value + + import time + + def calculate_inference_time(pipeline, validation_data): + inference_time = [] + pipeline.set_progress_bar_config(disable=True) + for prompt in validation_data: + start = time.perf_counter() + _ = pipeline(prompt, num_inference_steps=10, seed=0) + end = time.perf_counter() + delta = end - start + inference_time.append(delta) + return np.median(inference_time) + +.. code:: ipython3 + + %%skip not $to_quantize.value + + validation_size = 10 + validation_dataset = datasets.load_dataset("conceptual_captions", split="train", streaming=True).take(validation_size) + validation_data = [batch["caption"] for batch in validation_dataset] + + fp_latency = calculate_inference_time(ov_pipe, validation_data) + int8_latency = calculate_inference_time(int8_ov_pipe, validation_data) + print(f"Performance speed-up: {fp_latency / int8_latency:.3f}") + + +.. parsed-literal:: + + /home/nsavel/venvs/ov_notebooks_tmp/lib/python3.8/site-packages/datasets/load.py:1429: FutureWarning: The repository for conceptual_captions contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/conceptual_captions + You can avoid this message in future by passing the argument `trust_remote_code=True`. + Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( + + +.. parsed-literal:: + + Performance speed-up: 1.232 + + Run Text-to-Image generation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +---------------------------- @@ -950,24 +1269,51 @@ Now, you can define a text prompts for image generation and run inference pipeline. Optionally, you can also change the random generator seed for latent state initialization and number of steps. - **NOTE**: Consider increasing ``steps`` to get more precise results. + **Note**: Consider increasing ``steps`` to get more precise results. A suggested value is ``50``, but it will take longer time to process. +Please select below whether you would like to use the quantized model to +launch the interactive demo. + .. code:: ipython3 - import gradio as gr + quantized_model_present = int8_ov_pipe is not None + + use_quantized_model = widgets.Checkbox( + value=True if quantized_model_present else False, + description='Use quantized model', + disabled=not quantized_model_present, + ) + + use_quantized_model + + + + +.. parsed-literal:: + Checkbox(value=True, description='Use quantized model') + + +.. code:: ipython3 + + import gradio as gr + + + pipeline = int8_ov_pipe if use_quantized_model.value else ov_pipe + + def generate(prompt, negative_prompt, seed, num_steps, _=gr.Progress(track_tqdm=True)): - result = ov_pipe( + result = pipeline( prompt, negative_prompt=negative_prompt, num_inference_steps=num_steps, seed=seed, ) return result["sample"][0] - - + + gr.close_all() demo = gr.Interface( generate, @@ -985,22 +1331,8 @@ seed for latent state initialization and number of steps. ], "image", ) - + try: demo.queue().launch() except Exception: demo.queue().launch(share=True) - - -.. parsed-literal:: - - Running on local URL: http://127.0.0.1:7861 - - To create a public link, set `share=True` in `launch()`. - - - -.. .. raw:: html - -..
- diff --git a/docs/notebooks/237-segment-anything-with-output.rst b/docs/notebooks/237-segment-anything-with-output.rst index b8c679e3fa4834..6b1d7eb8121544 100644 --- a/docs/notebooks/237-segment-anything-with-output.rst +++ b/docs/notebooks/237-segment-anything-with-output.rst @@ -141,7 +141,7 @@ Prerequisites .. code:: ipython3 - %pip install -q "segment_anything" "gradio>=4.13" "openvino>=2023.1.0" "nncf>=2.5.0" "torch>=2.1" "torchvision>=0.16" --extra-index-url https://download.pytorch.org/whl/cpu + %pip install -q "segment_anything" "gradio>=4.13" "openvino>=2023.1.0" "nncf>=2.7.0" "torch>=2.1" "torchvision>=0.16" --extra-index-url https://download.pytorch.org/whl/cpu Convert model to OpenVINO Intermediate Representation ----------------------------------------------------- @@ -164,14 +164,14 @@ model type below to a SAM model checkpoint, then load the model using .. code:: ipython3 import sys - + sys.path.append("../utils") from notebook_utils import download_file - + checkpoint = "sam_vit_b_01ec64.pth" model_url = "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth" model_type = "vit_b" - + download_file(model_url) @@ -191,7 +191,7 @@ model type below to a SAM model checkpoint, then load the model using .. code:: ipython3 from segment_anything import sam_model_registry - + sam = sam_model_registry[model_type](checkpoint=checkpoint) As we already discussed, Image Encoder part can be used once per image, @@ -216,15 +216,15 @@ embeddings, tensor with shape ``1x256x64x64`` from pathlib import Path import torch import openvino as ov - + core = ov.Core() - + ov_encoder_path = Path("sam_image_encoder.xml") if not ov_encoder_path.exists(): with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=torch.jit.TracerWarning) warnings.filterwarnings("ignore", category=UserWarning) - + ov_encoder_model = ov.convert_model(sam.image_encoder, example_input=torch.zeros(1,3,1024,1024), input=([1,3,1024,1024],)) ov.save_model(ov_encoder_model, ov_encoder_path) else: @@ -233,14 +233,14 @@ embeddings, tensor with shape ``1x256x64x64`` .. code:: ipython3 import ipywidgets as widgets - + device = widgets.Dropdown( options=core.available_devices + ["AUTO"], value='AUTO', description='Device:', disabled=False, ) - + device @@ -294,7 +294,7 @@ Model outputs: .. code:: ipython3 from typing import Tuple - + class SamExportableModel(torch.nn.Module): def __init__( self, @@ -311,29 +311,29 @@ Model outputs: self.use_stability_score = use_stability_score self.stability_score_offset = 1.0 self.return_extra_metrics = return_extra_metrics - + def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor: point_coords = point_coords + 0.5 point_coords = point_coords / self.img_size point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords) point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding) - + point_embedding = point_embedding * (point_labels != -1).to(torch.float32) point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * ( point_labels == -1 ).to(torch.float32) - + for i in range(self.model.prompt_encoder.num_point_embeddings): point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[ i ].weight * (point_labels == i).to(torch.float32) - + return point_embedding - + def t_embed_masks(self, input_mask: torch.Tensor) -> torch.Tensor: mask_embedding = self.model.prompt_encoder.mask_downscaling(input_mask) return mask_embedding - + def mask_postprocessing(self, masks: torch.Tensor) -> torch.Tensor: masks = torch.nn.functional.interpolate( masks, @@ -342,7 +342,7 @@ Model outputs: align_corners=False, ) return masks - + def select_masks( self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int ) -> Tuple[torch.Tensor, torch.Tensor]: @@ -355,9 +355,9 @@ Model outputs: best_idx = torch.argmax(score, dim=1) masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1) iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1) - + return masks, iou_preds - + @torch.no_grad() def forward( self, @@ -373,33 +373,33 @@ Model outputs: ) else: dense_embedding = self._embed_masks(mask_input) - + masks, scores = self.model.mask_decoder.predict_masks( image_embeddings=image_embeddings, image_pe=self.model.prompt_encoder.get_dense_pe(), sparse_prompt_embeddings=sparse_embedding, dense_prompt_embeddings=dense_embedding, ) - + if self.use_stability_score: scores = calculate_stability_score( masks, self.model.mask_threshold, self.stability_score_offset ) - + if self.return_single_mask: masks, scores = self.select_masks(masks, scores, point_coords.shape[1]) - + upscaled_masks = self.mask_postprocessing(masks) - + if self.return_extra_metrics: stability_scores = calculate_stability_score( upscaled_masks, self.model.mask_threshold, self.stability_score_offset ) areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1) return upscaled_masks, scores, stability_scores, areas, masks - + return upscaled_masks, scores - + ov_model_path = Path("sam_mask_predictor.xml") if not ov_model_path.exists(): exportable_model = SamExportableModel(sam, return_single_mask=True) @@ -450,7 +450,7 @@ Example Image import numpy as np import cv2 import matplotlib.pyplot as plt - + download_file("https://raw.githubusercontent.com/facebookresearch/segment-anything/main/notebooks/images/truck.jpg") image = cv2.imread('truck.jpg') image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) @@ -495,25 +495,25 @@ These steps are applicable to all available models from copy import deepcopy from typing import Tuple - from torchvision.transforms.functional import resize, to_pil_image - + from torchvision.transforms.functional import resize, to_pil_image + class ResizeLongestSide: """ Resizes images to longest side 'target_length', as well as provides methods for resizing coordinates and boxes. Provides methods for transforming numpy arrays. """ - + def __init__(self, target_length: int) -> None: self.target_length = target_length - + def apply_image(self, image: np.ndarray) -> np.ndarray: """ Expects a numpy array with shape HxWxC in uint8 format. """ target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length) return np.array(resize(to_pil_image(image), target_size)) - + def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray: """ Expects a numpy array of length 2 in the final dimension. Requires the @@ -527,7 +527,7 @@ These steps are applicable to all available models coords[..., 0] = coords[..., 0] * (new_w / old_w) coords[..., 1] = coords[..., 1] * (new_h / old_h) return coords - + def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray: """ Expects a numpy array shape Bx4. Requires the original image size @@ -535,7 +535,7 @@ These steps are applicable to all available models """ boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size) return boxes.reshape(-1, 4) - + @staticmethod def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]: """ @@ -546,24 +546,24 @@ These steps are applicable to all available models neww = int(neww + 0.5) newh = int(newh + 0.5) return (newh, neww) - - + + resizer = ResizeLongestSide(1024) - - + + def preprocess_image(image: np.ndarray): resized_image = resizer.apply_image(image) resized_image = (resized_image.astype(np.float32) - [123.675, 116.28, 103.53]) / [58.395, 57.12, 57.375] resized_image = np.expand_dims(np.transpose(resized_image, (2, 0, 1)).astype(np.float32), 0) - + # Pad h, w = resized_image.shape[-2:] padh = 1024 - h padw = 1024 - w x = np.pad(resized_image, ((0, 0), (0, 0), (0, padh), (0, padw))) return x - - + + def postprocess_masks(masks: np.ndarray, orig_size): size_before_pad = resizer.get_preprocess_shape(orig_size[0], orig_size[1], masks.shape[-1]) masks = masks[..., :int(size_before_pad[0]), :int(size_before_pad[1])] @@ -577,19 +577,19 @@ These steps are applicable to all available models h, w = mask.shape[-2:] mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) ax.imshow(mask_image) - - + + def show_points(coords, labels, ax, marker_size=375): pos_points = coords[labels == 1] neg_points = coords[labels == 0] ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) - ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) - - + ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) + + def show_box(box, ax): x0, y0 = box[0], box[1] w, h = box[2] - box[0], box[3] - box[1] - ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2)) + ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2)) Image encoding ~~~~~~~~~~~~~~ @@ -605,7 +605,7 @@ reuse them. preprocessed_image = preprocess_image(image) encoding_results = ov_encoder(preprocessed_image) - + image_embeddings = encoding_results[ov_encoder.output(0)] Now, we can try to provide different prompts for mask generation @@ -622,12 +622,12 @@ location on the image below. input_point = np.array([[500, 375]]) input_label = np.array([1]) - + plt.figure(figsize=(10,10)) plt.imshow(image) show_points(input_point, input_label, plt.gca()) plt.axis('off') - plt.show() + plt.show() @@ -659,7 +659,7 @@ object). .. code:: ipython3 results = ov_predictor(inputs) - + masks = results[ov_predictor.output(0)] masks = postprocess_masks(masks, image.shape[:-1]) masks = masks > 0.0 @@ -671,7 +671,7 @@ object). show_mask(masks, plt.gca()) show_points(input_point, input_label, plt.gca()) plt.axis('off') - plt.show() + plt.show() @@ -699,7 +699,7 @@ Now, prompt for model looks like represented on this image: plt.imshow(image) show_points(input_point, input_label, plt.gca()) plt.axis('off') - plt.show() + plt.show() @@ -712,7 +712,7 @@ Transform the points as in the previous example. coord = np.concatenate([input_point, np.array([[0.0, 0.0]])], axis=0)[None, :, :] label = np.concatenate([input_label, np.array([-1])], axis=0)[None, :].astype(np.float32) - + coord = resizer.apply_coords(coord, image.shape[:2]).astype(np.float32) Package inputs, then predict and threshold the mask. @@ -724,9 +724,9 @@ Package inputs, then predict and threshold the mask. "point_coords": coord, "point_labels": label, } - + results = ov_predictor(inputs) - + masks = results[ov_predictor.output(0)] masks = postprocess_masks(masks, image.shape[:-1]) masks = masks > 0.0 @@ -738,7 +738,7 @@ Package inputs, then predict and threshold the mask. show_mask(masks, plt.gca()) show_points(input_point, input_label, plt.gca()) plt.axis('off') - plt.show() + plt.show() @@ -785,10 +785,10 @@ padding point since the input includes a box input. box_coords = input_box.reshape(2, 2) box_labels = np.array([2,3]) - + coord = np.concatenate([input_point, box_coords], axis=0)[None, :, :] label = np.concatenate([input_label, box_labels], axis=0)[None, :].astype(np.float32) - + coord = resizer.apply_coords(coord, image.shape[:2]).astype(np.float32) Package inputs, then predict and threshold the mask. @@ -800,9 +800,9 @@ Package inputs, then predict and threshold the mask. "point_coords": coord, "point_labels": label, } - + results = ov_predictor(inputs) - + masks = results[ov_predictor.output(0)] masks = postprocess_masks(masks, image.shape[:-1]) masks = masks > 0.0 @@ -834,13 +834,13 @@ point. .. code:: ipython3 import gradio as gr - + class Segmenter: def __init__(self, ov_encoder, ov_predictor): self.encoder = ov_encoder self.predictor = ov_predictor self._img_embeddings = None - + def set_image(self, img:np.ndarray): if self._img_embeddings is not None: del self._img_embeddings @@ -849,7 +849,7 @@ point. image_embeddings = encoding_results[ov_encoder.output(0)] self._img_embeddings = image_embeddings return img - + def get_mask(self, points, img): coord = np.array(points) coord = np.concatenate([coord, np.array([[0,0]])], axis=0) @@ -863,28 +863,28 @@ point. "point_coords": coord, "point_labels": label, } - + results = self.predictor(inputs) masks = results[ov_predictor.output(0)] masks = postprocess_masks(masks, img.shape[:-1]) - + masks = masks > 0.0 mask = masks[0] mask = np.transpose(mask, (1, 2, 0)) return mask - + segmenter = Segmenter(ov_encoder, ov_predictor) - - + + with gr.Blocks() as demo: with gr.Row(): input_img = gr.Image(label="Input", type="numpy", height=480, width=480) output_img = gr.Image(label="Selected Segment", type="numpy", height=480, width=480) - + def on_image_change(img): segmenter.set_image(img) return img - + def get_select_coords(img, evt: gr.SelectData): pixels_in_queue = set() h, w = img.shape[:2] @@ -900,10 +900,10 @@ point. out = cv2.addWeighted(out.astype(np.float32), 0.7, mask_image.astype(np.float32), 0.3, 0.0) out = out.astype(np.uint8) return out - + input_img.select(get_select_coords, [input_img], output_img) input_img.upload(on_image_change, [input_img], [input_img]) - + if __name__ == "__main__": try: demo.launch() @@ -914,14 +914,14 @@ point. .. parsed-literal:: Running on local URL: http://127.0.0.1:7860 - + To create a public link, set `share=True` in `launch()`. -.. .. raw:: html -..
+ + Run OpenVINO model in automatic mask generation mode @@ -942,15 +942,15 @@ postprocessing masks to remove small disconnected regions and holes. .. code:: ipython3 from segment_anything.utils.amg import ( - MaskData, - generate_crop_boxes, - uncrop_boxes_xyxy, - uncrop_masks, - uncrop_points, - calculate_stability_score, - rle_to_mask, - batched_mask_to_box, - mask_to_rle_pytorch, + MaskData, + generate_crop_boxes, + uncrop_boxes_xyxy, + uncrop_masks, + uncrop_points, + calculate_stability_score, + rle_to_mask, + batched_mask_to_box, + mask_to_rle_pytorch, is_box_near_crop_edge, batch_iterator, remove_small_regions, @@ -975,12 +975,12 @@ postprocessing masks to remove small disconnected regions and holes. stability_score_thresh ) -> MaskData: orig_h, orig_w = orig_size - + # Run model on this batch transformed_points = resizer.apply_coords(points, im_size) in_points = transformed_points in_labels = np.ones(in_points.shape[0], dtype=int) - + inputs = { "image_embeddings": image_embedding, "point_coords": in_points[:, None, :], @@ -990,7 +990,7 @@ postprocessing masks to remove small disconnected regions and holes. masks = postprocess_masks(res[ov_predictor.output(0)], orig_size) masks = torch.from_numpy(masks) iou_preds = torch.from_numpy(res[ov_predictor.output(1)]) - + # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), @@ -998,12 +998,12 @@ postprocessing masks to remove small disconnected regions and holes. points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks - + # Filter by predicted IoU if iou_thresh > 0.0: keep_mask = data["iou_preds"] > iou_thresh data.filter(keep_mask) - + # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], mask_threshold, stability_score_offset @@ -1011,21 +1011,21 @@ postprocessing masks to remove small disconnected regions and holes. if stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= stability_score_thresh data.filter(keep_mask) - + # Threshold masks and calculate boxes data["masks"] = data["masks"] > mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) - + # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): data.filter(keep_mask) - + # Compress to RLE data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w) data["rles"] = mask_to_rle_pytorch(data["masks"]) del data["masks"] - + return data .. code:: ipython3 @@ -1049,18 +1049,18 @@ postprocessing masks to remove small disconnected regions and holes. cropped_im_size = cropped_im.shape[:2] preprocessed_cropped_im = preprocess_image(cropped_im) crop_embeddings = ov_encoder(preprocessed_cropped_im)[ov_encoder.output(0)] - + # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = point_grids[crop_layer_idx] * points_scale - + # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(points_per_batch, points_for_image): batch_data = process_batch(crop_embeddings, points, cropped_im_size, crop_box, orig_size, pred_iou_thresh, mask_threshold, stability_score_offset, stability_score_thresh) data.cat(batch_data) del batch_data - + # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), @@ -1069,12 +1069,12 @@ postprocessing masks to remove small disconnected regions and holes. iou_threshold=box_nms_thresh, ) data.filter(keep_by_nms) - + # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) - + return data .. code:: ipython3 @@ -1084,13 +1084,13 @@ postprocessing masks to remove small disconnected regions and holes. crop_boxes, layer_idxs = generate_crop_boxes( orig_size, crop_n_layers, crop_overlap_ratio ) - + # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = process_crop(image, point_grids, crop_box, layer_idx, orig_size) data.cat(crop_data) - + # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops @@ -1103,7 +1103,7 @@ postprocessing masks to remove small disconnected regions and holes. iou_threshold=crop_nms_thresh, ) data.filter(keep_by_nms) - + data.to_numpy() return data @@ -1113,30 +1113,30 @@ postprocessing masks to remove small disconnected regions and holes. """ Removes small disconnected regions and holes in masks, then reruns box NMS to remove any new duplicates. - + Edits mask_data in place. - + Requires open-cv as a dependency. """ if len(mask_data["rles"]) == 0: return mask_data - + # Filter small disconnected regions and holes new_masks = [] scores = [] for rle in mask_data["rles"]: mask = rle_to_mask(rle) - + mask, changed = remove_small_regions(mask, min_area, mode="holes") unchanged = not changed mask, changed = remove_small_regions(mask, min_area, mode="islands") unchanged = unchanged and not changed - + new_masks.append(torch.as_tensor(mask).unsqueeze(0)) # Give score=0 to changed masks and score=1 to unchanged masks # so NMS will prefer ones that didn't need postprocessing scores.append(float(unchanged)) - + # Recalculate boxes and remove any new duplicates masks = torch.cat(new_masks, dim=0) boxes = batched_mask_to_box(masks) @@ -1146,7 +1146,7 @@ postprocessing masks to remove small disconnected regions and holes. torch.zeros(len(boxes)), # categories iou_threshold=nms_thresh, ) - + # Only recalculate RLEs for masks that have changed for i_mask in keep_by_nms: if scores[i_mask] == 0.0: @@ -1155,7 +1155,7 @@ postprocessing masks to remove small disconnected regions and holes. # update res directly mask_data["boxes"][i_mask] = boxes[i_mask] mask_data.filter(keep_by_nms) - + return mask_data There are several tunable parameters in automatic mask generation that @@ -1171,10 +1171,10 @@ smaller objects, and post-processing can remove stray pixels and holes ) -> List[Dict[str, Any]]: """ Generates masks for the given image. - + Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. - + Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: @@ -1199,7 +1199,7 @@ smaller objects, and post-processing can remove stray pixels and holes ) mask_data = generate_masks( image, point_grids, crop_n_layers, crop_overlap_ratio, crop_nms_thresh) - + # Filter small disconnected regions and holes in masks if min_mask_region_area > 0: mask_data = postprocess_small_regions( @@ -1207,10 +1207,10 @@ smaller objects, and post-processing can remove stray pixels and holes min_mask_region_area, max(box_nms_thresh, crop_nms_thresh), ) - + mask_data["segmentations"] = [ rle_to_mask(rle) for rle in mask_data["rles"]] - + # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): @@ -1224,7 +1224,7 @@ smaller objects, and post-processing can remove stray pixels and holes "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) - + return curr_anns .. code:: ipython3 @@ -1259,7 +1259,7 @@ is a dictionary containing various data about the mask. These keys are: .. code:: ipython3 from tqdm.notebook import tqdm - + def draw_anns(image, anns): if len(anns) == 0: return @@ -1274,10 +1274,10 @@ is a dictionary containing various data about the mask. These keys are: .. code:: ipython3 import PIL - + out = draw_anns(image, prediction) cv2.imwrite("result.png", out[:, :, ::-1]) - + PIL.Image.open("result.png") @@ -1325,12 +1325,12 @@ the label files. .. code:: ipython3 from zipfile import ZipFile - + DATA_URL = "https://ultralytics.com/assets/coco128.zip" OUT_DIR = Path('.') - + download_file(DATA_URL, directory=OUT_DIR, show_progress=True) - + if not (OUT_DIR / "coco128/images/train2017").exists(): with ZipFile('coco128.zip' , "r") as zip_ref: zip_ref.extractall(OUT_DIR) @@ -1348,20 +1348,20 @@ calibration dataset. For PyTorch, we can pass an instance of the .. code:: ipython3 import torch.utils.data as data - + class COCOLoader(data.Dataset): def __init__(self, images_path): self.images = list(Path(images_path).iterdir()) - + def __getitem__(self, index): image_path = self.images[index] image = cv2.imread(str(image_path)) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) return image - + def __len__(self): return len(self.images) - + coco_dataset = COCOLoader(OUT_DIR / 'coco128/images/train2017') calibration_loader = torch.utils.data.DataLoader(coco_dataset) @@ -1371,7 +1371,7 @@ dataset and returns data that can be passed to the model for inference. .. code:: ipython3 import nncf - + def transform_fn(image_data): """ Quantization transform function. Extracts and preprocess input data from dataloader item for quantization. @@ -1383,7 +1383,7 @@ dataset and returns data that can be passed to the model for inference. image = image_data.numpy() processed_image = preprocess_image(np.squeeze(image)) return processed_image - + calibration_dataset = nncf.Dataset(calibration_loader, transform_fn) @@ -1413,17 +1413,17 @@ result, we will use a ``mixed`` quantization preset. It provides symmetric quantization of weights and asymmetric quantization of activations. - **NOTE**: Model post-training quantization is time-consuming process. + **Note**: Model post-training quantization is time-consuming process. Be patient, it can take several minutes depending on your hardware. .. code:: ipython3 - + model = core.read_model(ov_encoder_path) quantized_model = nncf.quantize(model, calibration_dataset, model_type=nncf.parameters.ModelType.TRANSFORMER, - preset=nncf.common.quantization.structs.QuantizationPreset.MIXED, subset_size=128) + subset_size=128) print("model quantization finished") @@ -1453,6 +1453,10 @@ activations. + + + + .. code:: ipython3 ov_encoder_path_int8 = "sam_image_encoder_int8.xml" @@ -1472,12 +1476,12 @@ We can reuse the previous code to validate the output of ``INT8`` model. ov_encoder_int8 = core.compile_model(ov_encoder_model_int8, device.value) encoding_results = ov_encoder_int8(preprocessed_image) image_embeddings = encoding_results[ov_encoder_int8.output(0)] - + input_point = np.array([[500, 375]]) input_label = np.array([1]) coord = np.concatenate([input_point, np.array([[0.0, 0.0]])], axis=0)[None, :, :] label = np.concatenate([input_label, np.array([-1])], axis=0)[None, :].astype(np.float32) - + coord = resizer.apply_coords(coord, image.shape[:2]).astype(np.float32) inputs = { "image_embeddings": image_embeddings, @@ -1485,7 +1489,7 @@ We can reuse the previous code to validate the output of ``INT8`` model. "point_labels": label, } results = ov_predictor(inputs) - + masks = results[ov_predictor.output(0)] masks = postprocess_masks(masks, image.shape[:-1]) masks = masks > 0.0 @@ -1494,7 +1498,7 @@ We can reuse the previous code to validate the output of ``INT8`` model. show_mask(masks, plt.gca()) show_points(input_point, input_label, plt.gca()) plt.axis('off') - plt.show() + plt.show() @@ -1547,12 +1551,12 @@ models. [ WARNING ] Default duration 120 seconds is used for unknown device AUTO [ INFO ] OpenVINO: [ INFO ] Build ................................. 2023.1.0-12050-e33de350633 - [ INFO ] + [ INFO ] [ INFO ] Device info: [ INFO ] AUTO [ INFO ] Build ................................. 2023.1.0-12050-e33de350633 - [ INFO ] - [ INFO ] + [ INFO ] + [ INFO ] [Step 3/11] Setting device configuration [ WARNING ] Performance hint was not explicitly specified in command line. Device(AUTO) performance hint will be set to PerformanceMode.THROUGHPUT. [Step 4/11] Reading model files @@ -1600,7 +1604,7 @@ models. [ INFO ] LOADED_FROM_CACHE: False [Step 9/11] Creating infer requests and preparing input tensors [ WARNING ] No input files were given for input 'x'!. This input will be filled with random values! - [ INFO ] Fill input 'x' with random values + [ INFO ] Fill input 'x' with random values [Step 10/11] Measuring performance (Start inference asynchronously, 12 inference requests, limits: 120000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). [ INFO ] First inference took 3347.39 ms @@ -1630,12 +1634,12 @@ models. [ WARNING ] Default duration 120 seconds is used for unknown device AUTO [ INFO ] OpenVINO: [ INFO ] Build ................................. 2023.1.0-12050-e33de350633 - [ INFO ] + [ INFO ] [ INFO ] Device info: [ INFO ] AUTO [ INFO ] Build ................................. 2023.1.0-12050-e33de350633 - [ INFO ] - [ INFO ] + [ INFO ] + [ INFO ] [Step 3/11] Setting device configuration [ WARNING ] Performance hint was not explicitly specified in command line. Device(AUTO) performance hint will be set to PerformanceMode.THROUGHPUT. [Step 4/11] Reading model files @@ -1683,7 +1687,7 @@ models. [ INFO ] LOADED_FROM_CACHE: False [Step 9/11] Creating infer requests and preparing input tensors [ WARNING ] No input files were given for input 'x'!. This input will be filled with random values! - [ INFO ] Fill input 'x' with random values + [ INFO ] Fill input 'x' with random values [Step 10/11] Measuring performance (Start inference asynchronously, 12 inference requests, limits: 120000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). [ INFO ] First inference took 1951.78 ms diff --git a/docs/notebooks/238-deep-floyd-if-convert-with-output.rst b/docs/notebooks/238-deep-floyd-if-convert-with-output.rst index cacd14131f82e3..0dbdecfb961789 100644 --- a/docs/notebooks/238-deep-floyd-if-convert-with-output.rst +++ b/docs/notebooks/238-deep-floyd-if-convert-with-output.rst @@ -843,8 +843,12 @@ release! .. code:: ipython3 - # Temporary requirement - %pip install -q matplotlib + import platform + + if platform.system() != "Windows": + %pip install -q "matplotlib>=3.4" + else: + %pip install -q "matplotlib>=3.4,<3.7" .. parsed-literal:: diff --git a/docs/notebooks/238-deep-floyd-if-optimize-with-output.rst b/docs/notebooks/238-deep-floyd-if-optimize-with-output.rst index 13845873dde097..eec8cffe1dea1e 100644 --- a/docs/notebooks/238-deep-floyd-if-optimize-with-output.rst +++ b/docs/notebooks/238-deep-floyd-if-optimize-with-output.rst @@ -700,7 +700,6 @@ increasing calibration dataset size. For U-Net models, you can collect a more diverse dataset by using a smaller ``selection_prob`` value, but this will increase the dataset collection time. - Compare file sizes ^^^^^^^^^^^^^^^^^^ diff --git a/docs/notebooks/239-image-bind-convert-with-output.rst b/docs/notebooks/239-image-bind-convert-with-output.rst index 045601f71e8a09..7825b73fa903a4 100644 --- a/docs/notebooks/239-image-bind-convert-with-output.rst +++ b/docs/notebooks/239-image-bind-convert-with-output.rst @@ -95,28 +95,34 @@ Prerequisites .. code:: ipython3 import sys - - %pip install -q soundfile pytorchvideo ftfy "timm>=0.6.7" einops fvcore "openvino>=2023.1.0" numpy scipy matplotlib --extra-index-url https://download.pytorch.org/whl/cpu - + import platform + + %pip install -q soundfile pytorchvideo ftfy "timm>=0.6.7" einops fvcore "openvino>=2023.1.0" numpy scipy --extra-index-url https://download.pytorch.org/whl/cpu + if sys.version_info.minor < 8: %pip install -q "decord" else: %pip install -q "eva-decord" - + if sys.platform != "linux": %pip install -q "torch>=2.0.1" "torchvision>=0.15.2,<0.17.0" "torchaudio>=2.0.2" else: %pip install -q "torch>=2.0.1" "torchvision>=0.15.2,<0.17.0" "torchaudio>=2.0.2" --index-url https://download.pytorch.org/whl/cpu + + if platform.system() != "Windows": + %pip install -q "matplotlib>=3.4" + else: + %pip install -q "matplotlib>=3.4,<3.7" .. code:: ipython3 from pathlib import Path - + repo_dir = Path("ImageBind") - + if not repo_dir.exists(): !git clone https://github.com/facebookresearch/ImageBind.git - + %cd {repo_dir} @@ -147,7 +153,7 @@ card `__. import torch from imagebind.models import imagebind_model from imagebind.models.imagebind_model import ModalityType - + # Instantiate model model = imagebind_model.imagebind_huge(pretrained=True) model.eval(); @@ -186,11 +192,11 @@ data reading and preprocessing for each modality. .. code:: ipython3 # Prepare inputs - + text_list = ["A car", "A bird", "A dog"] image_paths = [".assets/dog_image.jpg", ".assets/car_image.jpg", ".assets/bird_image.jpg"] audio_paths = [".assets/dog_audio.wav", ".assets/bird_audio.wav", ".assets/car_audio.wav"] - + inputs = { ModalityType.TEXT: data.load_and_transform_text(text_list, "cpu"), ModalityType.VISION: data.load_and_transform_vision_data(image_paths, "cpu"), @@ -223,14 +229,14 @@ embeddings. super().__init__() self.model = model self.modality = modality - + def forward(self, data): return self.model({self.modality: data}) .. code:: ipython3 import openvino as ov - + core = ov.Core() Select inference device @@ -243,14 +249,14 @@ select device from dropdown list for running inference using OpenVINO .. code:: ipython3 import ipywidgets as widgets - + device = widgets.Dropdown( options=core.available_devices + ["AUTO"], value='AUTO', description='Device:', disabled=False, ) - + device @@ -265,7 +271,7 @@ select device from dropdown list for running inference using OpenVINO .. code:: ipython3 ov_modality_models = {} - + modalities = [ModalityType.TEXT, ModalityType.VISION, ModalityType.AUDIO] for modality in modalities: export_dir = Path(f"image-bind-{modality}") @@ -347,17 +353,17 @@ they represent the same object. import matplotlib.pyplot as plt import numpy as np from scipy.special import softmax - - + + def visualize_prob_matrix(matrix, x_label, y_label): fig, ax = plt.subplots() ax.matshow(matrix, cmap='winter') - + for (i, j), z in np.ndenumerate(matrix): ax.text(j, i, '{:0.3f}'.format(z), ha='center', va='center') ax.set_xticks(range(len(x_label)), x_label) ax.set_yticks(range(len(y_label)), y_label) - + image_list = [img.split('/')[-1] for img in image_paths] audio_list = [audio.split('/')[-1] for audio in audio_paths] @@ -369,7 +375,7 @@ Text-Image classification .. code:: ipython3 text_vision_scores = softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.TEXT].T, axis=-1) - + visualize_prob_matrix(text_vision_scores, text_list, image_list) @@ -385,7 +391,7 @@ Text-Audio classification .. code:: ipython3 text_audio_scores = softmax(embeddings[ModalityType.AUDIO] @ embeddings[ModalityType.TEXT].T, axis=-1) - + visualize_prob_matrix(text_audio_scores, text_list, audio_list) @@ -401,7 +407,7 @@ Image-Audio classification .. code:: ipython3 audio_vision_scores = softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.AUDIO].T, axis=-1) - + visualize_prob_matrix(audio_vision_scores, image_list, audio_list) @@ -424,7 +430,7 @@ Putting all together, we can match text, image, and sound for our data. .. parsed-literal:: - Predicted label: A car + Predicted label: A car probability for image - 1.000 probability for audio - 1.000 @@ -437,7 +443,7 @@ Putting all together, we can match text, image, and sound for our data. .. raw:: html - +
" - + description = """ Bark is a universal text-to-audio model created by [Suno](http://suno.ai). \ Bark can generate highly realistic, multilingual speech as well as other audio - including music, background noise and simple sound effects. \ The model output is not censored and the authors do not endorse the opinions in the generated content. \ Use at your own risk. """ - + article = """ - + ## 🌎 Foreign Language - + Bark supports various languages out-of-the-box and automatically determines language from input text. \ When prompted with code-switched text, Bark will even attempt to employ the native accent for the respective languages in the same voice. - + Try the prompt: - + ``` Buenos días Miguel. Tu colega piensa que tu alemán es extremadamente malo. But I suppose your english isn't terrible. ``` - + ## 🤭 Non-Speech Sounds - + Below is a list of some known non-speech sounds, but we are finding more every day. \ Please let us know if you find patterns that work particularly well on Discord! - + * [laughter] * [laughs] * [sighs] @@ -1027,43 +1132,43 @@ Interactive demo * ♪ for song lyrics * capitalization for emphasis of a word * MAN/WOMAN: for bias towards speaker - + Try the prompt: - + ``` " [clears throat] Hello, my name is Suno. And, uh — and I like pizza. [laughs] But I also have other interests such as... ♪ singing ♪." ``` - + ## 🎶 Music Bark can generate all types of audio, and, in principle, doesn't see a difference between speech and music. \ Sometimes Bark chooses to generate text as music, but you can help it out by adding music notes around your lyrics. - + Try the prompt: - + ``` ♪ In the jungle, the mighty jungle, the lion barks tonight ♪ ``` - + ## 🧬 Voice Cloning - + Bark has the capability to fully clone voices - including tone, pitch, emotion and prosody. \ The model also attempts to preserve music, ambient noise, etc. from input audio. \ However, to mitigate misuse of this technology, we limit the audio history prompts to a limited set of Suno-provided, fully synthetic options to choose from. - + ## 👥 Speaker Prompts - + You can provide certain speaker prompts such as NARRATOR, MAN, WOMAN, etc. \ Please note that these are not always respected, especially if a conflicting audio history prompt is given. - + Try the prompt: - + ``` WOMAN: I would like an oatmilk latte please. MAN: Wow, that's expensive! ``` - + """ - + examples = [ [ "Please surprise me and speak in whatever voice you enjoy. Vielen Dank und Gesundheit!", @@ -1078,15 +1183,15 @@ Interactive demo "Speaker 0 (es)", ], ] - - + + def gen_tts(text, history_prompt): history_prompt = PROMPT_LOOKUP[history_prompt] audio_arr = generate_audio(text, history_prompt=history_prompt) audio_arr = (audio_arr * 32767).astype(np.int16) return (SAMPLE_RATE, audio_arr) - - + + with gr.Blocks() as block: gr.Markdown(title) gr.Markdown(description) @@ -1105,23 +1210,9 @@ Interactive demo gr.Markdown(article) run_button.click(fn=gen_tts, inputs=inputs, outputs=outputs, queue=True) try: - block.queue().launch(debug=False) + block.launch(debug=False) except Exception: - block.queue().launch(share=True, debug=False) + block.launch(share=True, debug=False) # if you are launching remotely, specify server_name and server_port # demo.launch(server_name='your server name', server_port='server port in int') # Read more in the docs: https://gradio.app/docs/ - - -.. parsed-literal:: - - Running on local URL: http://127.0.0.1:7860 - - To create a public link, set `share=True` in `launch()`. - - - -.. .. raw:: html - -..
- diff --git a/docs/notebooks/257-llava-multimodal-chatbot-with-output.rst b/docs/notebooks/257-llava-multimodal-chatbot-with-output.rst index 3230c0f350aec2..e5089d48303d36 100644 --- a/docs/notebooks/257-llava-multimodal-chatbot-with-output.rst +++ b/docs/notebooks/257-llava-multimodal-chatbot-with-output.rst @@ -55,9 +55,12 @@ Table of contents: conversion <#prepare-helpers-for-model-conversion>`__ - `Convert and Optimize Model <#convert-and-optimize-model>`__ - - `Instantiate PyTorch model <#instantiate-pytorch-model>`__ - - `Compress Model weights to 4 and 8 bits using NNCF <#compress-model-weights-to-4-and-8-bits-using-nncf>`__ - - `Convert model to OpenVINO IR format <#convert-model-to-openvino-ir-format>`__ + - `Instantiate PyTorch model + <#instantiate-pytorch-model-uparrow#table-of-content>`__ + - `Compress Model weights to 4 and 8 bits using NNCF + <#compress-model-weights-to-4-and-8-bits-using-nncf-uparrow#table-of-content>`__ + - `Convert model to OpenVINO IR format + <#convert-model-to-openvino-ir-format-uparrow#table-of-content>`__ - `Prepare OpenVINO based inference pipeline <#prepare-openvino-based-inference-pipeline>`__ @@ -112,7 +115,7 @@ Install required dependencies .. code:: ipython3 import sys - + %pip install -q "torch>=2.1.0" "torchvision" "torchaudio" --index-url https://download.pytorch.org/whl/cpu %pip install -q "openvino>=2023.2.0" "nncf>=2.7.0" "sentencepiece" "tokenizers>=0.12.1" "transformers>=4.37.2" "gradio" "einops" @@ -120,7 +123,7 @@ Install required dependencies .. parsed-literal:: Note: you may need to restart the kernel to use updated packages. - + [notice] A new release of pip is available: 23.3.2 -> 24.0 [notice] To update, run: pip install --upgrade pip Note: you may need to restart the kernel to use updated packages. @@ -129,12 +132,12 @@ Install required dependencies .. code:: ipython3 from pathlib import Path - + repo_dir = Path("LLaVA") - + if not repo_dir.exists(): !git clone https://github.com/haotian-liu/LLaVA.git - + sys.path.insert(0, str(repo_dir.resolve())) Build model tokenizer and image processor @@ -157,9 +160,9 @@ instruction. from transformers import AutoTokenizer, AutoConfig, CLIPImageProcessor from llava.model.language_model.llava_mpt import LlavaMptForCausalLM - + model_id = "liuhaotian/LLaVA-Lightning-MPT-7B-preview" - + config = AutoConfig.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) image_processor = CLIPImageProcessor.from_pretrained(config.mm_vision_tower) @@ -178,7 +181,7 @@ instruction. DEFAULT_IM_END_TOKEN, DEFAULT_IMAGE_TOKEN ) - + mm_use_im_start_end = getattr(config, "mm_use_im_start_end", False) mm_use_im_patch_token = getattr(config, "mm_use_im_patch_token", True) if mm_use_im_patch_token: @@ -187,7 +190,7 @@ instruction. tokenizer.add_tokens( [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True ) - + if hasattr(config, "max_sequence_length"): context_len = config.max_sequence_length else: @@ -244,7 +247,7 @@ The code below prepares function for converting LLaVA model to OpenVINO Intermediate Representation format. It splits model on parts described above, prepare example inputs for each part and convert each part using `OpenVINO Model Conversion -API `__. +API `__. ``ov.convert_model`` function accepts PyTorch model instance and returns ``ov.Model`` object that represent model in OpenVINO format. It is ready to use for loading on device using ``ov.compile_model`` or can be saved @@ -260,19 +263,19 @@ on disk using ``ov.save_model``. import nncf from typing import Optional, Tuple, List import torch.nn.functional as F - + warnings.filterwarnings('ignore') - - + + class ModelWrapper(torch.nn.Module): """ Model wrapper class for export for spliting original forward logic on preparing multimodal data and inference using it. - That allows us to sperate image encoder and token embeddings model from general flow. + That allows us to sperate image encoder and token embeddings model from general flow. """ def __init__(self, model): super().__init__() self.model = model - + def forward( self, input_ids: torch.LongTensor = None, @@ -294,19 +297,19 @@ on disk using ``ov.save_model``. outputs.last_hidden_state.to(self.model.transformer.wte.weight.device), self.model.transformer.wte.weight.to(outputs.last_hidden_state.dtype), ) - + return (logits, tuple(outputs.past_key_values)) - - + + def patch_model_forward(model): """ - Helper function for patching model forward for model with past. - It makes model more convinient for export to TorchScript format avoiding limitation + Helper function for patching model forward for model with past. + It makes model more convinient for export to TorchScript format avoiding limitation that list of tensors can not be correctly traced as model input """ - + orig_forward = model.forward - + @wraps(orig_forward) def ts_patched_forward( input_ids: torch.Tensor, @@ -316,11 +319,11 @@ on disk using ``ov.save_model``. pkv_list = list(past_key_values) outs = orig_forward(input_ids=input_ids, past_key_values=pkv_list, attention_mask=attention_mask,) return outs - + model.forward = ts_patched_forward return model - - + + def flattenize_inputs(inputs): """ Helper function for making nested inputs flattens @@ -334,8 +337,8 @@ on disk using ``ov.save_model``. else: flatten_inputs.append(input_data) return flatten_inputs - - + + def cleanup_torchscript_cache(): """ Helper for removing cached model representation @@ -343,14 +346,14 @@ on disk using ``ov.save_model``. torch._C._jit_clear_class_registry() torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() torch.jit._state._clear_class_state() - + def postprocess_converted_model(ov_model, example_input=None, input_names=None, output_names=None, dynamic_shapes=None): """ Helper function for appling postprocessing on converted model with updating input names, shapes and output names acording to requested specification """ flatten_example_inputs = flattenize_inputs(example_input) if example_input else [] - + if input_names: for inp_name, m_input, input_data in zip(input_names, ov_model.inputs, flatten_example_inputs): input_node = m_input.get_node() @@ -362,20 +365,20 @@ on disk using ``ov.save_model``. shape[k] = -1 input_node.set_partial_shape(ov.PartialShape(shape)) m_input.get_tensor().set_names({inp_name}) - + if output_names: for out, out_name in zip(ov_model.outputs, output_names): out.get_tensor().set_names({out_name}) ov_model.validate_nodes_and_infer_types() return ov_model - - + + def convert_llava_mpt(pt_model: torch.nn.Module, model_path: Path, image_encoder_wc_parameters: Optional[dict] = None, llava_wc_parameters: Optional[dict] = None): """ LLaVA MPT model conversion function - + Params: pt_model: PyTorch model model_path: path for saving model @@ -403,7 +406,7 @@ on disk using ``ov.save_model``. del ov_model gc.collect() print("Image Encoder model successfully converted") - + if not token_embedding_model_path.exists(): model.forward = model.get_model().embed_tokens ov_model = ov.convert_model( @@ -414,7 +417,7 @@ on disk using ``ov.save_model``. del ov_model gc.collect() print("Token Embedding model successfully converted") - + if first_stage_model_path.exists() and second_stage_model_path.exists(): print("LLaVA model successfully converted") del pt_model @@ -433,7 +436,7 @@ on disk using ``ov.save_model``. dynamic_shapes[inputs[-1]] = {2: "past_sequence + sequence"} dynamic_shapes[inputs[-2]] = {2: "past_sequence + sequence"} outputs.extend([f"present.{idx}.key", f"present.{idx}.value"]) - + inputs.extend(["attention_mask"]) if not first_stage_model_path.exists(): ov_model = ov.convert_model( @@ -447,8 +450,8 @@ on disk using ``ov.save_model``. cleanup_torchscript_cache() del ov_model gc.collect() - - + + if not second_stage_model_path.exists(): model_wrap = patch_model_forward(model_wrap) example_input_second_stage = { @@ -458,10 +461,10 @@ on disk using ``ov.save_model``. } ov_model = ov.convert_model(model_wrap, example_input=example_input_second_stage) ov_model = postprocess_converted_model( - ov_model, - example_input=example_input_second_stage.values(), - input_names=inputs, - output_names=outputs, + ov_model, + example_input=example_input_second_stage.values(), + input_names=inputs, + output_names=outputs, dynamic_shapes=dynamic_shapes ) if llava_wc_parameters is not None: @@ -492,7 +495,7 @@ Convert model to OpenVINO format and save it on disk. Let’s consider each step more deeply. -Instantiate PyTorch model +Instantiate PyTorch model `:math:`\Uparrow` <#table-of-content>`__ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -503,7 +506,7 @@ from `HuggingFace hub `__ during first run. It may takes some time and requires at least 13 Gb free space on disk. -Compress Model weights to 4 and 8 bits using NNCF +Compress Model weights to 4 and 8 bits using NNCF `:math:`\Uparrow` <#table-of-content>`__ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -544,9 +547,9 @@ prediction quality. More details about weights compression, can be found in `OpenVINO documentation `__. - **NOTE**: There is no speedup for INT4 compressed models on dGPU. + **Note**: There is no speedup for INT4 compressed models on dGPU. -Convert model to OpenVINO IR format +Convert model to OpenVINO IR format `:math:`\Uparrow` <#table-of-content>`__ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -560,14 +563,14 @@ compression instead of INT8 weight compression. .. code:: ipython3 import ipywidgets as widgets - + compression_mode = widgets.Dropdown( options=['INT4', 'INT8'], value='INT4', description='Compression mode:', disabled=False, ) - + compression_mode @@ -587,7 +590,7 @@ compression instead of INT8 weight compression. else: compressed_model_dir = Path("llava-mpt/INT8_compressed_weights") llava_wc_parameters = dict(mode=nncf.CompressWeightsMode.INT8) - + if not compressed_model_dir.exists(): compressed_model_dir.mkdir(exist_ok=True, parents=True) config.save_pretrained(compressed_model_dir) @@ -595,10 +598,10 @@ compression instead of INT8 weight compression. vision_tower = model.get_vision_tower() if not vision_tower.is_loaded: vision_tower.load_model() - + if mm_use_im_start_end: model.resize_token_embeddings(len(tokenizer)) - + model.eval() with torch.no_grad(): convert_llava_mpt(model, compressed_model_dir, @@ -633,7 +636,9 @@ compression instead of INT8 weight compression. +.. raw:: html +

 
 
 
@@ -659,7 +664,9 @@ compression instead of INT8 weight compression.
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -691,7 +698,9 @@ compression instead of INT8 weight compression.
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -715,7 +724,9 @@ compression instead of INT8 weight compression.
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -747,7 +758,9 @@ compression instead of INT8 weight compression.
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -784,8 +797,8 @@ documentation  Tuple[Tuple[torch.Tensor]]:
@@ -1030,7 +1043,7 @@ documentation `__
    -  `Convert and Optimize Model <#convert-and-optimize-model>`__
 
-      -  `Instantiate PyTorch model <#instantiate-pytorch-model>`__
-      -  `Compress Model weights to 4 and 8 bits using NNCF <#compress-model-weights-to-4-and-8-bits-using-nncf>`__
-      -  `Convert model to OpenVINO IR format <#convert-model-to-openvino-ir-format>`__
+      -  `Instantiate PyTorch model
+          <#instantiate-pytorch-model-uparrow#table-of-content>`__
+      -  `Compress Model weights to 4 and 8 bits using NNCF
+          <#compress-model-weights-to-4-and-8-bits-using-nncf-uparrow#table-of-content>`__
+      -  `Convert model to OpenVINO IR format
+          <#convert-model-to-openvino-ir-format-uparrow#table-of-content>`__
 
 -  `Prepare OpenVINO based inference
    pipeline <#prepare-openvino-based-inference-pipeline>`__
@@ -80,19 +83,19 @@ Install required dependencies
 
 .. code:: ipython3
 
-    %pip install -q torch torchvision --extra-index-url https://download.pytorch.org/whl/cpu
-    %pip install -q "transformers>=4.31.0,<4.35.0" einops peft opencv_python decord pytorchvideo sentencepiece protobuf "openvino>=2023.2.0" "nncf>=2.7.0" gradio
+    %pip install -q torch "torchvision<0.17.0" "transformers>=4.31.0,<4.35.0" "pytorchvideo" "einops" "peft==0.6.2" --extra-index-url https://download.pytorch.org/whl/cpu
+    %pip install -q opencv_python decord sentencepiece protobuf "openvino>=2023.2.0" "nncf>=2.7.0" gradio
 
 .. code:: ipython3
 
     from pathlib import Path
     import sys
-
+    
     repo_dir = Path("Video-LLaVA")
-
+    
     if not repo_dir.exists():
         !git clone https://github.com/PKU-YuanGroup/Video-LLaVA.git
-
+    
     sys.path.insert(0, str(repo_dir.resolve()))
 
 .. container:: alert alert-block alert-warning
@@ -103,21 +106,24 @@ Install required dependencies
 .. code:: ipython3
 
     import gc
-
+    
     import transformers
-    from llava.model import LlavaLlamaForCausalLM
-    from llava.constants import (
-        DEFAULT_X_PATCH_TOKEN,
-        DEFAULT_X_START_TOKEN,
-        DEFAULT_X_END_TOKEN,
-        DEFAULT_X_TOKEN
+    from videollava.model import LlavaLlamaForCausalLM
+    from videollava.constants import (
+        DEFAULT_IMAGE_PATCH_TOKEN,
+        DEFAULT_VIDEO_PATCH_TOKEN,
+        DEFAULT_IM_START_TOKEN,
+        DEFAULT_VID_START_TOKEN,
+        DEFAULT_IM_END_TOKEN,
+        DEFAULT_VID_END_TOKEN,
+        DEFAULT_IMAGE_TOKEN,
     )
     transformers.logging.set_verbosity_error()
-
+    
     model_id = "LanguageBind/Video-LLaVA-7B"
-
+    
     config = transformers.AutoConfig.from_pretrained(model_id)
-
+    
     tokenizer = transformers.AutoTokenizer.from_pretrained(model_id)
     model = LlavaLlamaForCausalLM.from_pretrained(model_id)
     image_tower = model.get_image_tower()
@@ -126,16 +132,16 @@ Install required dependencies
     video_tower.load_model()
     image_processor = image_tower.image_processor
     video_processor = video_tower.video_processor
-    mm_use_x_start_end = getattr(config, "mm_use_x_start_end", False)
-    mm_use_x_patch_token = getattr(config, "mm_use_x_patch_token", True)
-    if mm_use_x_patch_token:
-        for x in config.X:
-            tokenizer.add_tokens([DEFAULT_X_PATCH_TOKEN[x.upper()]], special_tokens=True)
-    if mm_use_x_start_end:
-        for x in config.X:
-            tokenizer.add_tokens([DEFAULT_X_START_TOKEN[x.upper()], DEFAULT_X_END_TOKEN[x.upper()]], special_tokens=True)
+    mm_use_im_start_end = getattr(config, "mm_use_im_start_end", False)
+    mm_use_im_patch_token = getattr(config, "mm_use_im_patch_token", True)
+    if mm_use_im_patch_token:
+        tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
+        tokenizer.add_tokens([DEFAULT_VIDEO_PATCH_TOKEN], special_tokens=True)
+    if mm_use_im_start_end:
+        tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
+        tokenizer.add_tokens([DEFAULT_VID_START_TOKEN, DEFAULT_VID_END_TOKEN], special_tokens=True)
     preprocess_fn = model.prepare_inputs_labels_for_multimodal
-
+    
     del model
     gc.collect()
 
@@ -228,7 +234,7 @@ The code below prepares function for converting Video-LLaVA model to
 OpenVINO Intermediate Representation format. It splits model on parts
 described above, prepare example inputs for each part and convert each
 part using `OpenVINO Model Conversion
-API `__.
+API `__.
 ``ov.convert_model`` function accepts PyTorch model instance and returns
 ``ov.Model`` object that represent model in OpenVINO format. It is ready
 to use for loading on device using ``ov.compile_model`` or can be saved
@@ -240,13 +246,13 @@ on disk using ``ov.save_model``.
     import openvino as ov
     import nncf
     from typing import Optional, Tuple, List
-
-
+    
+    
     class ModelWrapper(torch.nn.Module):
         def __init__(self, model):
             super().__init__()
             self.model = model
-
+    
         def forward(
             self,
             input_ids: torch.LongTensor = None,
@@ -265,13 +271,13 @@ on disk using ``ov.save_model``.
                 output_hidden_states=False,
                 return_dict=True,
             )
-
+    
             hidden_states = outputs[0]
             logits = self.model.lm_head(hidden_states)
-
+    
             return (logits, outputs.past_key_values)
-
-
+    
+    
     def set_node_names(ov_model, input_names=None, output_names=None):
         if input_names is not None:
             for inp, name in zip(ov_model.inputs, input_names):
@@ -279,10 +285,10 @@ on disk using ``ov.save_model``.
         if output_names is not None:
             for out, name in zip(ov_model.outputs, output_names):
                 out.get_tensor().set_names({name})
-
+    
         ov_model.validate_nodes_and_infer_types()
-
-
+    
+    
     def cleanup_torchscript_cache():
         """
         Helper for removing cached model representation
@@ -290,7 +296,7 @@ on disk using ``ov.save_model``.
         torch._C._jit_clear_class_registry()
         torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore()
         torch.jit._state._clear_class_state()
-
+    
     def convert_videollava(
         pt_model: torch.nn.Module,
         model_path: Path,
@@ -298,7 +304,7 @@ on disk using ``ov.save_model``.
     ):
         """
         Video-LLaVA model conversion function
-
+    
         Params:
           pt_model: PyTorch model
           model_path: path for saving model
@@ -312,7 +318,7 @@ on disk using ``ov.save_model``.
         wrapped = ModelWrapper(pt_model)
         first_stage_model_path = ov_out_path / "videollava_input_embed.xml"
         second_stage_model_path = ov_out_path / "videollava_with_past.xml"
-
+    
         if first_stage_model_path.exists() and second_stage_model_path.exists():
             print("Video-LLaVA model successfully converted")
             del pt_model
@@ -327,7 +333,7 @@ on disk using ``ov.save_model``.
         for idx in range(len(outs[1])):
             input_names.extend([f"past_key_values.{idx}.key", f"past_key_values.{idx}.value"])
             output_names.extend([f"present.{idx}.key", f"present.{idx}.value"])
-
+    
         if not first_stage_model_path.exists():
             ov_model = ov.convert_model(
                 wrapped, example_input=example_input_first_stage
@@ -340,8 +346,8 @@ on disk using ``ov.save_model``.
             cleanup_torchscript_cache()
             del ov_model
             gc.collect()
-
-
+                
+    
         if not second_stage_model_path.exists():
             example_input_second_stage = {
                 "input_ids": torch.ones((1, 1), dtype=torch.long),
@@ -350,7 +356,7 @@ on disk using ``ov.save_model``.
             }
             ov_model = ov.convert_model(wrapped, example_input=example_input_second_stage)
             set_node_names(ov_model, input_names, output_names)
-
+    
             if videollava_wc_parameters is not None:
                 print("Applying weight compression to second stage Video-LLaVA model")
                 ov_model = nncf.compress_weights(ov_model, **videollava_wc_parameters)
@@ -379,7 +385,7 @@ Convert model to OpenVINO format and save it on disk.
 
 Let’s consider each step more deeply.
 
-Instantiate PyTorch model
+Instantiate PyTorch model `:math:`\Uparrow` <#table-of-content>`__
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 
@@ -390,7 +396,7 @@ from `HuggingFace hub `__ during first
 run. It may takes some time and requires at least 13 Gb free space on
 disk.
 
-Compress Model weights to 4 and 8 bits using NNCF
+Compress Model weights to 4 and 8 bits using NNCF `:math:`\Uparrow` <#table-of-content>`__
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 
@@ -431,9 +437,9 @@ prediction quality.
 More details about weights compression, can be found in `OpenVINO
 documentation `__.
 
-   **NOTE**: There is no speedup for INT4 compressed models on dGPU.
+   **Note**: There is no speedup for INT4 compressed models on dGPU.
 
-Convert model to OpenVINO IR format
+Convert model to OpenVINO IR format `:math:`\Uparrow` <#table-of-content>`__
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 
@@ -447,14 +453,14 @@ compression instead of INT8 weight compression.
 .. code:: ipython3
 
     import ipywidgets as widgets
-
+    
     compression_mode = widgets.Dropdown(
         options=["INT4", "INT8"],
         value="INT4",
         description="Compression mode:",
         disabled=False,
     )
-
+    
     compression_mode
 
 
@@ -474,12 +480,12 @@ compression instead of INT8 weight compression.
     else:
         compressed_model_dir = Path("videollava/INT8_compressed_weights")
         videollava_wc_parameters = dict(mode=nncf.CompressWeightsMode.INT8)
-
+    
     if not compressed_model_dir.exists():
         compressed_model_dir.mkdir(exist_ok=True, parents=True)
         model = LlavaLlamaForCausalLM.from_pretrained(model_id)
         model.resize_token_embeddings(len(tokenizer))
-
+        
         if hasattr(config, "max_sequence_length"):
             context_len = config.max_sequence_length
         else:
@@ -490,7 +496,7 @@ compression instead of INT8 weight compression.
         video_tower = model.get_video_tower()
         if not video_tower.is_loaded:
             video_tower.load_model()
-
+            
         model.eval()
         with torch.no_grad():
             convert_videollava(
@@ -521,7 +527,9 @@ compression instead of INT8 weight compression.
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -552,7 +560,9 @@ compression instead of INT8 weight compression.
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -576,7 +586,9 @@ compression instead of INT8 weight compression.
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -607,7 +619,9 @@ compression instead of INT8 weight compression.
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -643,8 +657,8 @@ documentation  Tuple[Tuple[torch.Tensor]]:
@@ -784,7 +798,7 @@ documentation =2023.1.0" matplotlib Pillow gradio
-    %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu torch transformers accelerate controlnet_aux "diffusers>=0.23.0"
+    import platform
+
+    %pip install -q "openvino>=2023.1.0" Pillow gradio
+    %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu torch transformers accelerate controlnet_aux "diffusers>=0.23.0" "peft==0.6.2"
+
+    if platform.system() != "Windows":
+        %pip install -q "matplotlib>=3.4"
+    else:
+        %pip install -q "matplotlib>=3.4,<3.7"
 
 
 .. parsed-literal::
@@ -74,7 +81,6 @@ Prerequisites
     import diffusers
     import torch
     import matplotlib.pyplot as plt
-    import ipywidgets
     import PIL
     import numpy as np
     import gradio as gr
@@ -853,9 +859,11 @@ select device from dropdown list for running inference using OpenVINO
 
 .. code:: ipython3
 
+    import ipywidgets as widgets
+
     core = ov.Core()
 
-    device = ipywidgets.Dropdown(
+    device = widgets.Dropdown(
         options=core.available_devices + ["AUTO"],
         value="AUTO",
         description="Device:",
diff --git a/docs/notebooks/259-decidiffusion-image-generation-with-output.rst b/docs/notebooks/259-decidiffusion-image-generation-with-output.rst
index 51aa6b6f013624..2b31ac01c580a8 100644
--- a/docs/notebooks/259-decidiffusion-image-generation-with-output.rst
+++ b/docs/notebooks/259-decidiffusion-image-generation-with-output.rst
@@ -89,7 +89,7 @@ install required packages
 
 .. code:: ipython3
 
-    %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu  "diffusers" "transformers" "torch" "pillow" "openvino>=2023.1.0" "gradio" "datasets" "nncf"
+    %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu  "diffusers" "transformers" "torch" "pillow" "openvino>=2023.1.0" "gradio" "datasets" "nncf>=2.7.0" "peft==0.6.2"
 
 Prepare DeciDiffusion models for OpenVINO format conversion
 -----------------------------------------------------------
@@ -171,9 +171,9 @@ to create diffusers pipeline for DeciDiffusion.
     import openvino as ov
     from diffusers import StableDiffusionPipeline
     import warnings
-
+    
     warnings.filterwarnings('ignore')
-
+    
     TEXT_ENCODER_OV_PATH = Path("model/text_encoder.xml")
     UNET_OV_PATH = Path('model/unet_nas.xml')
     VAE_ENCODER_OV_PATH = Path("model/vae_encoder.xml")
@@ -181,7 +181,7 @@ to create diffusers pipeline for DeciDiffusion.
     checkpoint = "Deci/DeciDiffusion-v1-0"
     scheduler_config_dir = Path("model/scheduler")
     tokenizer_dir = Path("model/tokenizer")
-
+    
     def load_orginal_pytorch_pipeline_componets():
         pipeline = StableDiffusionPipeline.from_pretrained(checkpoint, custom_pipeline=checkpoint, torch_dtype=torch.float32)
         pipeline.unet = pipeline.unet.from_pretrained(checkpoint, subfolder='flexible_unet', torch_dtype=torch.float32)
@@ -191,12 +191,12 @@ to create diffusers pipeline for DeciDiffusion.
         unet.eval()
         vae = pipeline.vae
         vae.eval()
-
+    
         del pipeline
         gc.collect();
         return text_encoder, unet, vae
-
-
+        
+    
     def cleanup_torchscript_cache():
         """
         Helper for removing cached model representation
@@ -204,10 +204,10 @@ to create diffusers pipeline for DeciDiffusion.
         torch._C._jit_clear_class_registry()
         torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore()
         torch.jit._state._clear_class_state()
-
-
+    
+    
     skip_conversion = TEXT_ENCODER_OV_PATH.exists() and UNET_OV_PATH.exists() and VAE_ENCODER_OV_PATH.exists() and VAE_DECODER_OV_PATH.exists()
-
+    
     if not skip_conversion:
         text_encoder, unet, vae = load_orginal_pytorch_pipeline_componets()
     else:
@@ -256,9 +256,9 @@ hidden states.
 
     def convert_encoder(text_encoder: torch.nn.Module, ir_path:Path):
         """
-        Convert Text Encoder mode.
-        Function accepts text encoder model, and prepares example inputs for conversion,
-        Parameters:
+        Convert Text Encoder mode. 
+        Function accepts text encoder model, and prepares example inputs for conversion, 
+        Parameters: 
             text_encoder (torch.nn.Module): text_encoder model from Stable Diffusion pipeline
             ir_path (Path): File for storing model
         Returns:
@@ -267,7 +267,7 @@ hidden states.
         input_ids = torch.ones((1, 77), dtype=torch.long)
         # switch model to inference mode
         text_encoder.eval()
-
+    
         # disable gradients calculation for reducing memory consumption
         with torch.no_grad():
             # Export model to IR format
@@ -277,13 +277,13 @@ hidden states.
         cleanup_torchscript_cache()
         gc.collect();
         print(f'Text Encoder successfully converted to IR and saved to {ir_path}')
-
-
+        
+    
     if not TEXT_ENCODER_OV_PATH.exists():
         convert_encoder(text_encoder, TEXT_ENCODER_OV_PATH)
     else:
         print(f"Text encoder will be loaded from {TEXT_ENCODER_OV_PATH}")
-
+    
     del text_encoder
     gc.collect();
 
@@ -311,18 +311,18 @@ Model predicts the ``sample`` state for the next step.
 .. code:: ipython3
 
     import numpy as np
-
+    
     dtype_mapping = {
         torch.float32: ov.Type.f32,
         torch.float64: ov.Type.f64
     }
-
-
+    
+    
     def convert_unet(unet:torch.nn.Module, ir_path:Path):
         """
-        Convert U-net model to IR format.
-        Function accepts unet model, prepares example inputs for conversion,
-        Parameters:
+        Convert U-net model to IR format. 
+        Function accepts unet model, prepares example inputs for conversion, 
+        Parameters: 
             unet (StableDiffusionPipeline): unet from Stable Diffusion pipeline
             ir_path (Path): File for storing model
         Returns:
@@ -341,7 +341,7 @@ Model predicts the ``sample`` state for the next step.
                 shape[0] = -1
             element_type = dtype_mapping[input_tensor.dtype]
             input_info.append((shape, element_type))
-
+    
         unet.eval()
         with torch.no_grad():
             ov_model = ov.convert_model(unet, example_input=dummy_inputs, input=input_info)
@@ -350,8 +350,8 @@ Model predicts the ``sample`` state for the next step.
         cleanup_torchscript_cache()
         gc.collect();
         print(f'U-Net NAS successfully converted to IR and saved to {ir_path}')
-
-
+    
+    
     if not UNET_OV_PATH.exists():
         convert_unet(unet, UNET_OV_PATH)
     else:
@@ -391,11 +391,11 @@ of the pipeline, it will be better to convert them to separate models.
 
     def convert_vae_encoder(vae: torch.nn.Module, ir_path: Path):
         """
-        Convert VAE model for encoding to IR format.
-        Function accepts vae model, creates wrapper class for export only necessary for inference part,
-        prepares example inputs for conversion,
-        Parameters:
-            vae (torch.nn.Module): VAE model from StableDiffusio pipeline
+        Convert VAE model for encoding to IR format. 
+        Function accepts vae model, creates wrapper class for export only necessary for inference part, 
+        prepares example inputs for conversion, 
+        Parameters: 
+            vae (torch.nn.Module): VAE model from StableDiffusio pipeline 
             ir_path (Path): File for storing model
         Returns:
             None
@@ -404,7 +404,7 @@ of the pipeline, it will be better to convert them to separate models.
             def __init__(self, vae):
                 super().__init__()
                 self.vae = vae
-
+    
             def forward(self, image):
                 return self.vae.encode(x=image)["latent_dist"].sample()
         vae_encoder = VAEEncoderWrapper(vae)
@@ -417,20 +417,20 @@ of the pipeline, it will be better to convert them to separate models.
         cleanup_torchscript_cache()
         gc.collect();
         print(f'VAE encoder successfully converted to IR and saved to {ir_path}')
-
-
+    
+    
     if not VAE_ENCODER_OV_PATH.exists():
         convert_vae_encoder(vae, VAE_ENCODER_OV_PATH)
     else:
         print(f"VAE encoder will be loaded from {VAE_ENCODER_OV_PATH}")
-
-
+    
+    
     def convert_vae_decoder(vae: torch.nn.Module, ir_path: Path):
         """
-        Convert VAE model for decoding to IR format.
-        Function accepts vae model, creates wrapper class for export only necessary for inference part,
-        prepares example inputs for conversion,
-        Parameters:
+        Convert VAE model for decoding to IR format. 
+        Function accepts vae model, creates wrapper class for export only necessary for inference part, 
+        prepares example inputs for conversion, 
+        Parameters: 
             vae (torch.nn.Module): VAE model frm StableDiffusion pipeline
             ir_path (Path): File for storing model
         Returns:
@@ -440,13 +440,13 @@ of the pipeline, it will be better to convert them to separate models.
             def __init__(self, vae):
                 super().__init__()
                 self.vae = vae
-
+    
             def forward(self, latents):
                 return self.vae.decode(latents)
-
+        
         vae_decoder = VAEDecoderWrapper(vae)
         latents = torch.zeros((1, 4, 64, 64))
-
+    
         vae_decoder.eval()
         with torch.no_grad():
             ov_model = ov.convert_model(vae_decoder, example_input=latents, input=[((1,4,64,64),)])
@@ -455,13 +455,13 @@ of the pipeline, it will be better to convert them to separate models.
         cleanup_torchscript_cache()
         gc.collect();
         print(f'VAE decoder successfully converted to IR and saved to {ir_path}')
-
-
+    
+    
     if not VAE_DECODER_OV_PATH.exists():
         convert_vae_decoder(vae, VAE_DECODER_OV_PATH)
     else:
         print(f"VAE decoder will be loaded from {VAE_DECODER_OV_PATH}")
-
+    
     del vae
     gc.collect();
 
@@ -538,7 +538,7 @@ one with or without the other. More explanation of how it works can be
 found in this
 `article `__.
 
-**NOTE**: negative prompting applicable only for high guidance scale (at
+**Note**: negative prompting applicable only for high guidance scale (at
 least > 1).
 
 Strength for controlling Image-to-Image generation
@@ -572,21 +572,21 @@ between 0.4 and 0.6.
 
     import inspect
     from typing import List, Optional, Union, Dict
-
+    
     import PIL
     import cv2
-
+    
     from transformers import CLIPTokenizer
     from diffusers.pipelines.pipeline_utils import DiffusionPipeline
     from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
     from openvino.runtime import Model
-
-
+    
+    
     def scale_fit_to_window(dst_width:int, dst_height:int, image_width:int, image_height:int):
         """
-        Preprocessing helper function for calculating image size for resize with peserving original aspect ratio
+        Preprocessing helper function for calculating image size for resize with peserving original aspect ratio 
         and fitting image to specific window size
-
+        
         Parameters:
           dst_width (int): destination window width
           dst_height (int): destination window height
@@ -598,15 +598,15 @@ between 0.4 and 0.6.
         """
         im_scale = min(dst_height / image_height, dst_width / image_width)
         return int(im_scale * image_width), int(im_scale * image_height)
-
-
+    
+    
     def preprocess(image: PIL.Image.Image):
         """
         Image preprocessing function. Takes image in PIL.Image format, resizes it to keep aspect ration and fits to model input window 512x512,
         then converts it to np.ndarray and adds padding with zeros on right or bottom side of image (depends from aspect ratio), after that
         converts data to float32 data type and change range of values from [0, 255] to [-1, 1], finally, converts data layout from planar NHWC to NCHW.
         The function returns preprocessed input tensor and padding size, which can be used in postprocessing.
-
+        
         Parameters:
           image (PIL.Image.Image): input image
         Returns:
@@ -625,8 +625,8 @@ between 0.4 and 0.6.
         image = 2.0 * image - 1.0
         image = image.transpose(0, 3, 1, 2)
         return image, {"padding": pad, "src_width": src_width, "src_height": src_height}
-
-
+    
+    
     class OVStableDiffusionPipeline(DiffusionPipeline):
         def __init__(
             self,
@@ -666,7 +666,7 @@ between 0.4 and 0.6.
             self.height = 512
             self.width = 512
             self.tokenizer = tokenizer
-
+    
         def __call__(
             self,
             prompt: Union[str, List[str]],
@@ -709,31 +709,31 @@ between 0.4 and 0.6.
                 gif (bool, *optional*, False):
                     Flag for storing all steps results or not.
             Returns:
-                Dictionary with keys:
+                Dictionary with keys: 
                     sample - the last generated image PIL.Image.Image or np.array
                     iterations - *optional* (if gif=True) images for all diffusion steps, List of PIL.Image.Image or np.array.
             """
             if seed is not None:
                 np.random.seed(seed)
-
+    
             img_buffer = []
             do_classifier_free_guidance = guidance_scale > 1.0
             # get prompt text embeddings
             text_embeddings = self._encode_prompt(prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt)
-
+            
             # set timesteps
             accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
             extra_set_kwargs = {}
             if accepts_offset:
                 extra_set_kwargs["offset"] = 1
-
+    
             self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
             timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength)
             latent_timestep = timesteps[:1]
-
+    
             # get the initial random noise unless the user supplied it
             latents, meta = self.prepare_latents(image, latent_timestep)
-
+    
             # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
             # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
             # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
@@ -742,36 +742,36 @@ between 0.4 and 0.6.
             extra_step_kwargs = {}
             if accepts_eta:
                 extra_step_kwargs["eta"] = eta
-
+    
             for i, t in enumerate(self.progress_bar(timesteps)):
                 # expand the latents if you are doing classifier free guidance
                 latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents
                 latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
-
+    
                 # predict the noise residual
                 noise_pred = self.unet([latent_model_input, t, text_embeddings])[self._unet_output]
                 # perform guidance
                 if do_classifier_free_guidance:
                     noise_pred_uncond, noise_pred_text = noise_pred[0], noise_pred[1]
                     noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
+    
                 # compute the previous noisy sample x_t -> x_t-1
                 latents = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs)["prev_sample"].numpy()
                 if gif:
                     image = self.vae_decoder(latents * (1 / 0.18215))[self._vae_d_output]
                     image = self.postprocess_image(image, meta, output_type)
                     img_buffer.extend(image)
-
+    
             # scale and decode the image latents with vae
             image = self.vae_decoder(latents * (1 / 0.18215))[self._vae_d_output]
-
+    
             image = self.postprocess_image(image, meta, output_type)
             return {"sample": image, 'iterations': img_buffer}
-
+        
         def _encode_prompt(self, prompt:Union[str, List[str]], num_images_per_prompt:int = 1, do_classifier_free_guidance:bool = True, negative_prompt:Union[str, List[str]] = None):
             """
             Encodes the prompt into text encoder hidden states.
-
+    
             Parameters:
                 prompt (str or list(str)): prompt to be encoded
                 num_images_per_prompt (int): number of images that should be generated per prompt
@@ -781,7 +781,7 @@ between 0.4 and 0.6.
                 text_embeddings (np.ndarray): text encoder hidden states
             """
             batch_size = len(prompt) if isinstance(prompt, list) else 1
-
+    
             # tokenize input prompts
             text_inputs = self.tokenizer(
                 prompt,
@@ -791,10 +791,10 @@ between 0.4 and 0.6.
                 return_tensors="np",
             )
             text_input_ids = text_inputs.input_ids
-
+    
             text_embeddings = self.text_encoder(
                 text_input_ids)[self._text_encoder_output]
-
+    
             # duplicate text embeddings for each generation per prompt
             if num_images_per_prompt != 1:
                 bs_embed, seq_len, _ = text_embeddings.shape
@@ -802,7 +802,7 @@ between 0.4 and 0.6.
                     text_embeddings, (1, num_images_per_prompt, 1))
                 text_embeddings = np.reshape(
                     text_embeddings, (bs_embed * num_images_per_prompt, seq_len, -1))
-
+    
             # get unconditional embeddings for classifier free guidance
             if do_classifier_free_guidance:
                 uncond_tokens: List[str]
@@ -820,26 +820,26 @@ between 0.4 and 0.6.
                     truncation=True,
                     return_tensors="np",
                 )
-
+    
                 uncond_embeddings = self.text_encoder(uncond_input.input_ids)[self._text_encoder_output]
-
+    
                 # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
                 seq_len = uncond_embeddings.shape[1]
                 uncond_embeddings = np.tile(uncond_embeddings, (1, num_images_per_prompt, 1))
                 uncond_embeddings = np.reshape(uncond_embeddings, (batch_size * num_images_per_prompt, seq_len, -1))
-
+    
                 # For classifier free guidance, we need to do two forward passes.
                 # Here we concatenate the unconditional and text embeddings into a single batch
                 # to avoid doing two forward passes
                 text_embeddings = np.concatenate([uncond_embeddings, text_embeddings])
-
+    
             return text_embeddings
-
-
+    
+    
         def prepare_latents(self, image:PIL.Image.Image = None, latent_timestep:torch.Tensor = None):
             """
             Function for getting initial latents for starting generation
-
+            
             Parameters:
                 image (PIL.Image.Image, *optional*, None):
                     Input image for generation, if not provided randon noise will be used as starting point
@@ -860,12 +860,12 @@ between 0.4 and 0.6.
             latents = self.vae_encoder(input_image)[self._vae_e_output] * 0.18215
             latents = self.scheduler.add_noise(torch.from_numpy(latents), torch.from_numpy(noise), latent_timestep).numpy()
             return latents, meta
-
+    
         def postprocess_image(self, image:np.ndarray, meta:Dict, output_type:str = "pil"):
             """
-            Postprocessing for decoded image. Takes generated image decoded by VAE decoder, unpad it to initila image size (if required),
+            Postprocessing for decoded image. Takes generated image decoded by VAE decoder, unpad it to initila image size (if required), 
             normalize and convert to [0, 255] pixels range. Optionally, convertes it from np.ndarray to PIL.Image format
-
+            
             Parameters:
                 image (np.ndarray):
                     Generated image
@@ -899,26 +899,26 @@ between 0.4 and 0.6.
                     image = [cv2.resize(img, (orig_width, orig_width))
                              for img in image]
             return image
-
+    
         def get_timesteps(self, num_inference_steps:int, strength:float):
             """
             Helper function for getting scheduler timesteps for generation
             In case of image-to-image generation, it updates number of steps according to strength
-
+            
             Parameters:
                num_inference_steps (int):
                   number of inference steps for generation
                strength (float):
-                   value between 0.0 and 1.0, that controls the amount of noise that is added to the input image.
+                   value between 0.0 and 1.0, that controls the amount of noise that is added to the input image. 
                    Values that approach 1.0 enable lots of variations but will also produce images that are not semantically consistent with the input.
             """
             # get the original timestep using init_timestep
             init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
-
+    
             t_start = max(num_inference_steps - init_timestep, 0)
             timesteps = self.scheduler.timesteps[t_start:]
-
-            return timesteps, num_inference_steps - t_start
+    
+            return timesteps, num_inference_steps - t_start 
 
 Configure Inference Pipeline
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -936,14 +936,14 @@ inference using OpenVINO.
 .. code:: ipython3
 
     import ipywidgets as widgets
-
+    
     device = widgets.Dropdown(
         options=core.available_devices + ["AUTO"],
         value='CPU',
         description='Device:',
         disabled=False,
     )
-
+    
     device
 
 .. code:: ipython3
@@ -957,7 +957,7 @@ inference using OpenVINO.
 .. code:: ipython3
 
     ov_vae_config = {"INFERENCE_PRECISION_HINT": "f32"} if device.value != "CPU" else {}
-
+    
     vae_decoder = core.compile_model(VAE_DECODER_OV_PATH, device.value, ov_vae_config)
     vae_encoder = core.compile_model(VAE_ENCODER_OV_PATH, device.value, ov_vae_config)
 
@@ -968,19 +968,19 @@ Let us define them and put all components together
 
     from transformers import AutoTokenizer
     from diffusers import DDIMScheduler
-
+    
     if not tokenizer_dir.exists():
         tokenizer = AutoTokenizer.from_pretrained(checkpoint, subfolder='tokenizer')
         tokenizer.save_pretrained(tokenizer_dir)
     else:
         tokenizer = AutoTokenizer.from_pretrained(tokenizer_dir)
-
+    
     if not scheduler_config_dir.exists():
         scheduler = DDIMScheduler.from_pretrained(checkpoint, subfolder="scheduler")
         scheduler.save_pretrained(scheduler_config_dir)
     else:
         scheduler = DDIMScheduler.from_pretrained(scheduler_config_dir)
-
+    
     ov_pipe = OVStableDiffusionPipeline(
         tokenizer=tokenizer,
         text_encoder=text_enc,
@@ -1067,7 +1067,7 @@ diffusion models can be used to “enhance” an image.
     guidance_scale = 7.5
     num_i2i_steps = 15
     seed_i2i = seed
-
+    
     image = load_image(default_image_url)
     print('Pipeline settings')
     print(f'Input text: {text_i2i_prompt}')
@@ -1156,7 +1156,7 @@ improve model inference speed.
         description='Quantization',
         disabled=False,
     )
-
+    
     to_quantize
 
 
@@ -1175,9 +1175,9 @@ Let’s load ``skip magic`` extension to skip quantization if
 
     import sys
     sys.path.append("../utils")
-
+    
     int8_pipe = None
-
+    
     %load_ext skip_kernel_extension
 
 Prepare calibration dataset
@@ -1193,13 +1193,13 @@ model inputs for calibration we should customize ``CompiledModel``.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     class CompiledModelDecorator(ov.CompiledModel):
         def __init__(self, compiled_model, prob=0.5):
             super().__init__(compiled_model)
             self.data_cache = []
             self.prob = np.clip(prob, 0, 1)
-
+    
         def __call__(self, *args, **kwargs):
             if np.random.rand() >= self.prob:
                 self.data_cache.append(*args)
@@ -1208,21 +1208,21 @@ model inputs for calibration we should customize ``CompiledModel``.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     import datasets
     from tqdm.notebook import tqdm
     from transformers import set_seed
     from typing import Any, Dict, List
-
+    
     set_seed(1)
-
+    
     def collect_calibration_data(pipeline: OVStableDiffusionPipeline, subset_size: int) -> List[Dict]:
         original_unet = pipeline.unet
         pipeline.unet = CompiledModelDecorator(original_unet, prob=0.3)
         pipeline.set_progress_bar_config(disable=True)
-
+    
         dataset = datasets.load_dataset("conceptual_captions", split="train", streaming=True).shuffle(seed=42)
-
+    
         pbar = tqdm(total=subset_size)
         for batch in dataset:
             prompt = batch["caption"]
@@ -1234,7 +1234,7 @@ model inputs for calibration we should customize ``CompiledModel``.
                 pbar.update(subset_size - pbar.n)
                 break
             pbar.update(collected_subset_size - pbar.n)
-
+    
         calibration_dataset = pipeline.unet.data_cache
         pipeline.set_progress_bar_config(disable=False)
         pipeline.unet = original_unet
@@ -1243,9 +1243,9 @@ model inputs for calibration we should customize ``CompiledModel``.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     UNET_INT8_OV_PATH = Path('model/unet_nas_int8.xml')
-
+    
     if not UNET_INT8_OV_PATH.exists():
         subset_size = 300
         unet_calibration_data = collect_calibration_data(ov_pipe, subset_size=subset_size)
@@ -1263,17 +1263,16 @@ Create a quantized model from the pre-trained converted OpenVINO model.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     import nncf
-
+    
     UNET_INT8_OV_PATH = Path('model/unet_nas_int8.xml')
-
+    
     if not UNET_INT8_OV_PATH.exists():
         unet = core.read_model(UNET_OV_PATH)
         quantized_unet = nncf.quantize(
             model=unet,
             subset_size=subset_size,
-            preset=nncf.QuantizationPreset.MIXED,
             calibration_dataset=nncf.Dataset(unet_calibration_data),
             model_type=nncf.ModelType.TRANSFORMER,
             # Smooth Quant algorithm reduces activation quantization error; optimal alpha value was obtained through grid search
@@ -1292,9 +1291,9 @@ Create a quantized model from the pre-trained converted OpenVINO model.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     unet_optimized = core.compile_model(UNET_INT8_OV_PATH, device.value)
-
+    
     int8_pipe = OVStableDiffusionPipeline(
         tokenizer=tokenizer,
         text_encoder=text_enc,
@@ -1310,14 +1309,14 @@ data.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     import matplotlib.pyplot as plt
     from PIL import Image
-
+    
     def visualize_results(orig_img:Image.Image, optimized_img:Image.Image):
         """
         Helper function for results visualization
-
+    
         Parameters:
            orig_img (Image.Image): generated image using FP16 models
            optimized_img (Image.Image): generated image using quantized models
@@ -1339,7 +1338,7 @@ data.
         list_axes[1].imshow(np.array(optimized_img))
         list_axes[0].set_title(orig_title, fontsize=15)
         list_axes[1].set_title(control_title, fontsize=15)
-
+    
         fig.subplots_adjust(wspace=0.01, hspace=0.01)
         fig.tight_layout()
         return fig
@@ -1349,7 +1348,7 @@ Text-to-Image generation
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     fp16_image = ov_pipe(text_prompt, num_inference_steps=num_steps, seed=seed)['sample'][0]
     int8_image = int8_pipe(text_prompt, num_inference_steps=num_steps, seed=seed)['sample'][0]
     fig = visualize_results(fp16_image, int8_image)
@@ -1376,7 +1375,7 @@ Image-to-Image generation
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     fp16_text_i2i = ov_pipe(text_i2i_prompt, image, guidance_scale=guidance_scale, strength=strength, num_inference_steps=num_i2i_steps, seed=seed_i2i)['sample'][0]
     int8_text_i2i = int8_pipe(text_i2i_prompt, image, guidance_scale=guidance_scale, strength=strength, num_inference_steps=num_i2i_steps, seed=seed_i2i)['sample'][0]
     fig = visualize_results(fp16_text_i2i, int8_text_i2i)
@@ -1413,9 +1412,9 @@ pipelines, we use median inference time on calibration subset.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     import time
-
+    
     validation_size = 10
     calibration_dataset = datasets.load_dataset("conceptual_captions", split="train", streaming=True)
     validation_data = []
@@ -1424,7 +1423,7 @@ pipelines, we use median inference time on calibration subset.
             break
         prompt = batch["caption"]
         validation_data.append(prompt)
-
+    
     def calculate_inference_time(pipeline, calibration_dataset):
         inference_time = []
         pipeline.set_progress_bar_config(disable=True)
@@ -1441,7 +1440,7 @@ pipelines, we use median inference time on calibration subset.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     fp_latency = calculate_inference_time(ov_pipe, validation_data)
     int8_latency = calculate_inference_time(int8_pipe, validation_data)
     print(f"Performance speed up: {fp_latency / int8_latency:.3f}")
@@ -1460,10 +1459,10 @@ Compare UNet file size
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     fp16_ir_model_size = UNET_OV_PATH.with_suffix(".bin").stat().st_size / 1024
     quantized_model_size = UNET_INT8_OV_PATH.with_suffix(".bin").stat().st_size / 1024
-
+    
     print(f"FP16 model size: {fp16_ir_model_size:.2f} KB")
     print(f"INT8 model size: {quantized_model_size:.2f} KB")
     print(f"Model compression rate: {fp16_ir_model_size / quantized_model_size:.3f}")
@@ -1487,13 +1486,13 @@ launch the interactive demo.
 .. code:: ipython3
 
     quantized_model_present = int8_pipe is not None
-
+    
     use_quantized_model = widgets.Checkbox(
         value=True if quantized_model_present else False,
         description='Use quantized model',
         disabled=not quantized_model_present,
     )
-
+    
     use_quantized_model
 
 
@@ -1508,22 +1507,22 @@ launch the interactive demo.
 .. code:: ipython3
 
     import gradio as gr
-
+    
     sample_img_url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/tower.jpg"
-
+    
     img = load_image(sample_img_url).save("tower.jpg")
     pipeline = int8_pipe if use_quantized_model.value else ov_pipe
-
+    
     def generate_from_text(text, negative_prompt, seed, num_steps, guidance_scale, _=gr.Progress(track_tqdm=True)):
         result = pipeline(text, negative_prompt=negative_prompt, num_inference_steps=num_steps, seed=seed, guidance_scale=guidance_scale)
         return result["sample"][0]
-
-
+    
+    
     def generate_from_image(img, text, negative_prompt, seed, num_steps, strength, guidance_scale, _=gr.Progress(track_tqdm=True)):
         result = pipeline(text, img, negative_prompt=negative_prompt, num_inference_steps=num_steps, seed=seed, strength=strength, guidance_scale=guidance_scale)
         return result["sample"][0]
-
-
+    
+    
     with gr.Blocks() as demo:
         with gr.Tab("Text-to-Image generation"):
             with gr.Row():
@@ -1560,9 +1559,9 @@ launch the interactive demo.
             gr.Examples(
                 [["tower.jpg", sample_i2i_text, "", 6400023, 30, 0.6, 5]],
                 [i2i_input, i2i_text_input, i2i_neg_text_input, i2i_seed_input, i2i_steps_input, strength_input, i2i_guidance_scale],
-
+                
             )
-
+    
     try:
         demo.queue().launch(debug=False)
     except Exception:
diff --git a/docs/notebooks/260-pix2struct-docvqa-with-output.rst b/docs/notebooks/260-pix2struct-docvqa-with-output.rst
index c5fe788b323a97..46ee3232f36143 100644
--- a/docs/notebooks/260-pix2struct-docvqa-with-output.rst
+++ b/docs/notebooks/260-pix2struct-docvqa-with-output.rst
@@ -100,7 +100,7 @@ documentation `__.
 .. code:: ipython3
 
     %pip install -q torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
-    %pip install -q "git+https://github.com/huggingface/optimum-intel.git" "openvino>=2023.1.0" "transformers>=4.33.0" onnx gradio --extra-index-url https://download.pytorch.org/whl/cpu
+    %pip install -q "git+https://github.com/huggingface/optimum-intel.git" "openvino>=2023.1.0" "transformers>=4.33.0" "peft==0.6.2" onnx gradio --extra-index-url https://download.pytorch.org/whl/cpu
 
 Download and Convert Model
 --------------------------
@@ -136,10 +136,10 @@ applicable for other models from pix2struct family.
     import gc
     from pathlib import Path
     from optimum.intel.openvino import OVModelForPix2Struct
-
+    
     model_id = "google/pix2struct-docvqa-base"
     model_dir = Path(model_id.split('/')[-1])
-
+    
     if not model_dir.exists():
         ov_model = OVModelForPix2Struct.from_pretrained(model_id, export=True, compile=False)
         ov_model.half()
@@ -175,16 +175,16 @@ select device from dropdown list for running inference using OpenVINO
 
     import ipywidgets as widgets
     import openvino as ov
-
+    
     core = ov.Core()
-
+    
     device = widgets.Dropdown(
         options=[d for d in core.available_devices if "GPU" not in d] + ["AUTO"],
         value='AUTO',
         description='Device:',
         disabled=False,
     )
-
+    
     device
 
 
@@ -217,7 +217,7 @@ by ``Pix2StructProcessor.decode``
 .. code:: ipython3
 
     from transformers import Pix2StructProcessor
-
+    
     processor = Pix2StructProcessor.from_pretrained(model_id)
     ov_model = OVModelForPix2Struct.from_pretrained(model_dir, device=device.value)
 
@@ -238,18 +238,18 @@ documentation =2023.1.0"
+    %pip install -q "openvino-dev>=2024.0.0"
     %pip install -q "nncf>=2.6.0"
     %pip install -q "gradio>=4.13"
 
@@ -178,1873 +178,192 @@ model and generate a segmentation map.
 .. parsed-literal::
 
 
-  0%|          | 144k/138M [00:00<01:38, 1.47MB/s]
+  0%|          | 304k/138M [00:00<00:46, 3.09MB/s]
 
 .. parsed-literal::
 
 
-  0%|          | 496k/138M [00:00<00:53, 2.70MB/s]
+  2%|▏         | 2.25M/138M [00:00<00:10, 13.2MB/s]
 
 .. parsed-literal::
 
 
-  1%|          | 880k/138M [00:00<00:44, 3.21MB/s]
+  5%|▌         | 7.08M/138M [00:00<00:04, 29.2MB/s]
 
 .. parsed-literal::
 
 
-  1%|          | 1.23M/138M [00:00<00:41, 3.43MB/s]
+  9%|▊         | 11.9M/138M [00:00<00:03, 37.5MB/s]
 
 .. parsed-literal::
 
 
-  1%|          | 1.61M/138M [00:00<00:40, 3.54MB/s]
+ 12%|█▏        | 16.8M/138M [00:00<00:02, 42.5MB/s]
 
 .. parsed-literal::
 
 
-  1%|▏         | 1.97M/138M [00:00<00:39, 3.62MB/s]
+ 16%|█▌        | 21.6M/138M [00:00<00:02, 44.9MB/s]
 
 .. parsed-literal::
 
 
-  2%|▏         | 2.34M/138M [00:00<00:38, 3.68MB/s]
+ 19%|█▉        | 26.7M/138M [00:00<00:02, 47.7MB/s]
 
 .. parsed-literal::
 
 
-  2%|▏         | 2.72M/138M [00:00<00:38, 3.72MB/s]
+ 23%|██▎       | 31.4M/138M [00:00<00:02, 48.1MB/s]
 
 .. parsed-literal::
 
 
-  2%|▏         | 3.09M/138M [00:00<00:38, 3.73MB/s]
+ 26%|██▌       | 36.0M/138M [00:00<00:02, 47.2MB/s]
 
 .. parsed-literal::
 
 
-  3%|▎         | 3.47M/138M [00:01<00:37, 3.73MB/s]
+ 29%|██▉       | 40.5M/138M [00:01<00:02, 46.6MB/s]
 
 .. parsed-literal::
 
 
-  3%|▎         | 3.84M/138M [00:01<00:37, 3.76MB/s]
+ 33%|███▎      | 45.1M/138M [00:01<00:02, 47.0MB/s]
 
 .. parsed-literal::
 
 
-  3%|▎         | 4.22M/138M [00:01<00:37, 3.75MB/s]
+ 36%|███▌      | 49.6M/138M [00:01<00:01, 46.7MB/s]
 
 .. parsed-literal::
 
 
-  3%|▎         | 4.59M/138M [00:01<00:37, 3.77MB/s]
+ 39%|███▉      | 54.0M/138M [00:01<00:01, 44.5MB/s]
 
 .. parsed-literal::
 
 
-  4%|▎         | 4.97M/138M [00:01<00:37, 3.76MB/s]
+ 42%|████▏     | 58.3M/138M [00:01<00:01, 42.9MB/s]
 
 .. parsed-literal::
 
 
-  4%|▍         | 5.34M/138M [00:01<00:36, 3.77MB/s]
+ 45%|████▌     | 62.8M/138M [00:01<00:01, 44.1MB/s]
 
 .. parsed-literal::
 
 
-  4%|▍         | 5.72M/138M [00:01<00:36, 3.77MB/s]
+ 49%|████▊     | 67.1M/138M [00:01<00:01, 44.0MB/s]
 
 .. parsed-literal::
 
 
-  4%|▍         | 6.09M/138M [00:01<00:36, 3.79MB/s]
+ 52%|█████▏    | 71.5M/138M [00:01<00:01, 44.7MB/s]
 
 .. parsed-literal::
 
 
-  5%|▍         | 6.45M/138M [00:01<00:36, 3.76MB/s]
+ 55%|█████▍    | 75.8M/138M [00:01<00:01, 44.1MB/s]
 
 .. parsed-literal::
 
 
-  5%|▍         | 6.83M/138M [00:01<00:36, 3.77MB/s]
+ 58%|█████▊    | 80.3M/138M [00:01<00:01, 45.2MB/s]
 
 .. parsed-literal::
 
 
-  5%|▌         | 7.20M/138M [00:02<00:36, 3.76MB/s]
+ 61%|██████    | 84.7M/138M [00:02<00:01, 44.7MB/s]
 
 .. parsed-literal::
 
 
-  5%|▌         | 7.58M/138M [00:02<00:36, 3.77MB/s]
+ 65%|██████▍   | 89.3M/138M [00:02<00:01, 45.9MB/s]
 
 .. parsed-literal::
 
 
-  6%|▌         | 7.95M/138M [00:02<00:36, 3.79MB/s]
+ 68%|██████▊   | 93.7M/138M [00:02<00:01, 44.3MB/s]
 
 .. parsed-literal::
 
 
-  6%|▌         | 8.31M/138M [00:02<00:36, 3.75MB/s]
+ 71%|███████▏  | 98.6M/138M [00:02<00:00, 46.3MB/s]
 
 .. parsed-literal::
 
 
-  6%|▋         | 8.69M/138M [00:02<00:36, 3.77MB/s]
+ 75%|███████▍  | 103M/138M [00:02<00:00, 45.7MB/s]
 
 .. parsed-literal::
 
 
-  7%|▋         | 9.06M/138M [00:02<00:36, 3.76MB/s]
+ 78%|███████▊  | 107M/138M [00:02<00:00, 43.3MB/s]
 
 .. parsed-literal::
 
 
-  7%|▋         | 9.44M/138M [00:02<00:35, 3.77MB/s]
+ 81%|████████  | 112M/138M [00:02<00:00, 43.7MB/s]
 
 .. parsed-literal::
 
 
-  7%|▋         | 9.81M/138M [00:02<00:35, 3.78MB/s]
+ 84%|████████▍ | 116M/138M [00:02<00:00, 42.3MB/s]
 
 .. parsed-literal::
 
 
-  7%|▋         | 10.2M/138M [00:02<00:35, 3.80MB/s]
+ 87%|████████▋ | 120M/138M [00:02<00:00, 40.6MB/s]
 
 .. parsed-literal::
 
 
-  8%|▊         | 10.5M/138M [00:02<00:35, 3.76MB/s]
+ 90%|████████▉ | 124M/138M [00:03<00:00, 41.7MB/s]
 
 .. parsed-literal::
 
 
-  8%|▊         | 10.9M/138M [00:03<00:35, 3.76MB/s]
+ 93%|█████████▎| 128M/138M [00:03<00:00, 42.5MB/s]
 
 .. parsed-literal::
 
 
-  8%|▊         | 11.3M/138M [00:03<00:35, 3.77MB/s]
+ 96%|█████████▌| 133M/138M [00:03<00:00, 43.0MB/s]
 
 .. parsed-literal::
 
 
-  8%|▊         | 11.7M/138M [00:03<00:35, 3.76MB/s]
+ 99%|█████████▉| 137M/138M [00:03<00:00, 43.4MB/s]
 
 .. parsed-literal::
 
 
-  9%|▊         | 12.0M/138M [00:03<00:35, 3.76MB/s]
+   100%|██████████| 138M/138M [00:03<00:00, 43.1MB/s]
 
-.. parsed-literal::
-
-
-  9%|▉         | 12.4M/138M [00:03<00:35, 3.76MB/s]
-
-.. parsed-literal::
-
-
-  9%|▉         | 12.8M/138M [00:03<00:35, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 10%|▉         | 13.2M/138M [00:03<00:34, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 10%|▉         | 13.5M/138M [00:03<00:34, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 10%|█         | 13.9M/138M [00:03<00:34, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 10%|█         | 14.3M/138M [00:04<00:34, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 11%|█         | 14.7M/138M [00:04<00:34, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 11%|█         | 15.0M/138M [00:04<00:34, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 11%|█         | 15.4M/138M [00:04<00:34, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 11%|█▏        | 15.8M/138M [00:04<00:34, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 12%|█▏        | 16.2M/138M [00:04<00:33, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 12%|█▏        | 16.5M/138M [00:04<00:33, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 12%|█▏        | 16.9M/138M [00:04<00:33, 3.80MB/s]
-
-.. parsed-literal::
-
-
- 13%|█▎        | 17.3M/138M [00:04<00:33, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 13%|█▎        | 17.7M/138M [00:04<00:33, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 13%|█▎        | 18.0M/138M [00:05<00:33, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 13%|█▎        | 18.4M/138M [00:05<00:33, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 14%|█▎        | 18.8M/138M [00:05<00:33, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 14%|█▍        | 19.2M/138M [00:05<00:33, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 14%|█▍        | 19.5M/138M [00:05<00:33, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 14%|█▍        | 19.9M/138M [00:05<00:32, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 15%|█▍        | 20.3M/138M [00:05<00:32, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 15%|█▍        | 20.7M/138M [00:05<00:32, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 15%|█▌        | 21.0M/138M [00:05<00:32, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 15%|█▌        | 21.4M/138M [00:06<00:32, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 16%|█▌        | 21.8M/138M [00:06<00:32, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 16%|█▌        | 22.2M/138M [00:06<00:32, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 16%|█▋        | 22.5M/138M [00:06<00:32, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 17%|█▋        | 22.9M/138M [00:06<00:32, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 17%|█▋        | 23.3M/138M [00:06<00:31, 3.80MB/s]
-
-.. parsed-literal::
-
-
- 17%|█▋        | 23.7M/138M [00:06<00:32, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 17%|█▋        | 24.0M/138M [00:06<00:31, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 18%|█▊        | 24.4M/138M [00:06<00:31, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 18%|█▊        | 24.8M/138M [00:06<00:31, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 18%|█▊        | 25.2M/138M [00:07<00:31, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 18%|█▊        | 25.5M/138M [00:07<00:31, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 19%|█▊        | 25.9M/138M [00:07<00:31, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 19%|█▉        | 26.3M/138M [00:07<00:31, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 19%|█▉        | 26.6M/138M [00:07<00:31, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 20%|█▉        | 27.0M/138M [00:07<00:31, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 20%|█▉        | 27.4M/138M [00:07<00:30, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 20%|██        | 27.8M/138M [00:07<00:30, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 20%|██        | 28.1M/138M [00:07<00:30, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 21%|██        | 28.5M/138M [00:07<00:30, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 21%|██        | 28.9M/138M [00:08<00:30, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 21%|██        | 29.3M/138M [00:08<00:30, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 21%|██▏       | 29.6M/138M [00:08<00:30, 3.72MB/s]
-
-.. parsed-literal::
-
-
- 22%|██▏       | 30.0M/138M [00:08<00:30, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 22%|██▏       | 30.4M/138M [00:08<00:30, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 22%|██▏       | 30.8M/138M [00:08<00:30, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 23%|██▎       | 31.1M/138M [00:08<00:29, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 23%|██▎       | 31.5M/138M [00:08<00:29, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 23%|██▎       | 31.9M/138M [00:08<00:29, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 23%|██▎       | 32.3M/138M [00:09<00:29, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 24%|██▎       | 32.6M/138M [00:09<00:29, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 24%|██▍       | 33.0M/138M [00:09<00:29, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 24%|██▍       | 33.4M/138M [00:09<00:29, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 24%|██▍       | 33.8M/138M [00:09<00:29, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 25%|██▍       | 34.1M/138M [00:09<00:28, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 25%|██▍       | 34.5M/138M [00:09<00:28, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 25%|██▌       | 34.9M/138M [00:09<00:28, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 26%|██▌       | 35.3M/138M [00:09<00:28, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 26%|██▌       | 35.6M/138M [00:09<00:28, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 26%|██▌       | 36.0M/138M [00:10<00:28, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 26%|██▋       | 36.4M/138M [00:10<00:28, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 27%|██▋       | 36.7M/138M [00:10<00:28, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 27%|██▋       | 37.1M/138M [00:10<00:28, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 27%|██▋       | 37.5M/138M [00:10<00:27, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 27%|██▋       | 37.9M/138M [00:10<00:27, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 28%|██▊       | 38.2M/138M [00:10<00:27, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 28%|██▊       | 38.6M/138M [00:10<00:27, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 28%|██▊       | 39.0M/138M [00:10<00:27, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 28%|██▊       | 39.4M/138M [00:11<00:27, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 29%|██▊       | 39.7M/138M [00:11<00:27, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 29%|██▉       | 40.1M/138M [00:11<00:27, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 29%|██▉       | 40.5M/138M [00:11<00:27, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 30%|██▉       | 40.9M/138M [00:11<00:26, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 30%|██▉       | 41.2M/138M [00:11<00:27, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 30%|███       | 41.6M/138M [00:11<00:26, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 30%|███       | 42.0M/138M [00:11<00:26, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 31%|███       | 42.4M/138M [00:11<00:26, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 31%|███       | 42.7M/138M [00:11<00:26, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 31%|███       | 43.1M/138M [00:12<00:26, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 31%|███▏      | 43.5M/138M [00:12<00:26, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 32%|███▏      | 43.9M/138M [00:12<00:26, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 32%|███▏      | 44.2M/138M [00:12<00:26, 3.70MB/s]
-
-.. parsed-literal::
-
-
- 32%|███▏      | 44.6M/138M [00:12<00:26, 3.73MB/s]
-
-.. parsed-literal::
-
-
- 33%|███▎      | 45.0M/138M [00:12<00:26, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 33%|███▎      | 45.4M/138M [00:12<00:26, 3.72MB/s]
-
-.. parsed-literal::
-
-
- 33%|███▎      | 45.7M/138M [00:12<00:25, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 33%|███▎      | 46.1M/138M [00:12<00:25, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 34%|███▎      | 46.5M/138M [00:13<00:25, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 34%|███▍      | 46.8M/138M [00:13<00:25, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 34%|███▍      | 47.2M/138M [00:13<00:25, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 34%|███▍      | 47.6M/138M [00:13<00:25, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 35%|███▍      | 48.0M/138M [00:13<00:25, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 35%|███▍      | 48.3M/138M [00:13<00:25, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 35%|███▌      | 48.7M/138M [00:13<00:24, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 36%|███▌      | 49.1M/138M [00:13<00:24, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 36%|███▌      | 49.5M/138M [00:13<00:24, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 36%|███▌      | 49.8M/138M [00:13<00:24, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 36%|███▋      | 50.2M/138M [00:14<00:24, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 37%|███▋      | 50.6M/138M [00:14<00:24, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 37%|███▋      | 51.0M/138M [00:14<00:24, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 37%|███▋      | 51.3M/138M [00:14<00:24, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 37%|███▋      | 51.7M/138M [00:14<00:24, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 38%|███▊      | 52.1M/138M [00:14<00:23, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 38%|███▊      | 52.5M/138M [00:14<00:23, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 38%|███▊      | 52.8M/138M [00:14<00:23, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 39%|███▊      | 53.2M/138M [00:14<00:23, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 39%|███▉      | 53.6M/138M [00:14<00:23, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 39%|███▉      | 54.0M/138M [00:15<00:23, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 39%|███▉      | 54.3M/138M [00:15<00:23, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 40%|███▉      | 54.7M/138M [00:15<00:23, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 40%|███▉      | 55.1M/138M [00:15<00:23, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 40%|████      | 55.5M/138M [00:15<00:23, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 40%|████      | 55.8M/138M [00:15<00:22, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 41%|████      | 56.2M/138M [00:15<00:22, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 41%|████      | 56.6M/138M [00:15<00:22, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 41%|████      | 57.0M/138M [00:15<00:22, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 41%|████▏     | 57.3M/138M [00:16<00:22, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 42%|████▏     | 57.7M/138M [00:16<00:22, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 42%|████▏     | 58.1M/138M [00:16<00:22, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 42%|████▏     | 58.5M/138M [00:16<00:22, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 43%|████▎     | 58.8M/138M [00:16<00:21, 3.80MB/s]
-
-.. parsed-literal::
-
-
- 43%|████▎     | 59.2M/138M [00:16<00:21, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 43%|████▎     | 59.6M/138M [00:16<00:21, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 43%|████▎     | 60.0M/138M [00:16<00:21, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 44%|████▎     | 60.3M/138M [00:16<00:21, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 44%|████▍     | 60.7M/138M [00:16<00:21, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 44%|████▍     | 61.1M/138M [00:17<00:21, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 44%|████▍     | 61.5M/138M [00:17<00:21, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 45%|████▍     | 61.8M/138M [00:17<00:21, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 45%|████▌     | 62.2M/138M [00:17<00:21, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 45%|████▌     | 62.6M/138M [00:17<00:21, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 46%|████▌     | 63.0M/138M [00:17<00:20, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 46%|████▌     | 63.3M/138M [00:17<00:20, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 46%|████▌     | 63.7M/138M [00:17<00:20, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 46%|████▋     | 64.1M/138M [00:17<00:20, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 47%|████▋     | 64.4M/138M [00:18<00:20, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 47%|████▋     | 64.8M/138M [00:18<00:20, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 47%|████▋     | 65.2M/138M [00:18<00:20, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 47%|████▋     | 65.6M/138M [00:18<00:20, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 48%|████▊     | 65.9M/138M [00:18<00:20, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 48%|████▊     | 66.3M/138M [00:18<00:19, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 48%|████▊     | 66.7M/138M [00:18<00:19, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 49%|████▊     | 67.1M/138M [00:18<00:19, 3.80MB/s]
-
-.. parsed-literal::
-
-
- 49%|████▉     | 67.4M/138M [00:18<00:19, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 49%|████▉     | 67.8M/138M [00:18<00:19, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 49%|████▉     | 68.2M/138M [00:19<00:19, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 50%|████▉     | 68.6M/138M [00:19<00:19, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 50%|████▉     | 68.9M/138M [00:19<00:19, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 50%|█████     | 69.3M/138M [00:19<00:19, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 50%|█████     | 69.7M/138M [00:19<00:19, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 51%|█████     | 70.1M/138M [00:19<00:18, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 51%|█████     | 70.4M/138M [00:19<00:18, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 51%|█████     | 70.8M/138M [00:19<00:18, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 51%|█████▏    | 71.2M/138M [00:19<00:18, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 52%|█████▏    | 71.6M/138M [00:19<00:18, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 52%|█████▏    | 71.9M/138M [00:20<00:18, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 52%|█████▏    | 72.3M/138M [00:20<00:18, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 53%|█████▎    | 72.7M/138M [00:20<00:18, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 53%|█████▎    | 73.0M/138M [00:20<00:18, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 53%|█████▎    | 73.4M/138M [00:20<00:18, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 53%|█████▎    | 73.8M/138M [00:20<00:17, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 54%|█████▎    | 74.2M/138M [00:20<00:17, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 54%|█████▍    | 74.5M/138M [00:20<00:17, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 54%|█████▍    | 74.9M/138M [00:20<00:17, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 54%|█████▍    | 75.3M/138M [00:21<00:17, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 55%|█████▍    | 75.7M/138M [00:21<00:17, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 55%|█████▌    | 76.0M/138M [00:21<00:17, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 55%|█████▌    | 76.4M/138M [00:21<00:17, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 56%|█████▌    | 76.8M/138M [00:21<00:17, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 56%|█████▌    | 77.2M/138M [00:21<00:16, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 56%|█████▌    | 77.5M/138M [00:21<00:16, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 56%|█████▋    | 77.9M/138M [00:21<00:16, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 57%|█████▋    | 78.3M/138M [00:21<00:16, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 57%|█████▋    | 78.7M/138M [00:21<00:16, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 57%|█████▋    | 79.0M/138M [00:22<00:16, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 57%|█████▋    | 79.4M/138M [00:22<00:16, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 58%|█████▊    | 79.8M/138M [00:22<00:16, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 58%|█████▊    | 80.2M/138M [00:22<00:16, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 58%|█████▊    | 80.5M/138M [00:22<00:16, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 59%|█████▊    | 80.9M/138M [00:22<00:15, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 59%|█████▉    | 81.3M/138M [00:22<00:15, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 59%|█████▉    | 81.7M/138M [00:22<00:15, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 59%|█████▉    | 82.0M/138M [00:22<00:15, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 60%|█████▉    | 82.4M/138M [00:23<00:15, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 60%|█████▉    | 82.8M/138M [00:23<00:15, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 60%|██████    | 83.1M/138M [00:23<00:15, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 60%|██████    | 83.5M/138M [00:23<00:15, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 61%|██████    | 83.9M/138M [00:23<00:15, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 61%|██████    | 84.3M/138M [00:23<00:15, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 61%|██████    | 84.6M/138M [00:23<00:14, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 61%|██████▏   | 85.0M/138M [00:23<00:14, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 62%|██████▏   | 85.4M/138M [00:23<00:14, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 62%|██████▏   | 85.8M/138M [00:23<00:14, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 62%|██████▏   | 86.1M/138M [00:24<00:14, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 63%|██████▎   | 86.5M/138M [00:24<00:14, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 63%|██████▎   | 86.9M/138M [00:24<00:14, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 63%|██████▎   | 87.2M/138M [00:24<00:14, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 63%|██████▎   | 87.6M/138M [00:24<00:14, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 64%|██████▎   | 88.0M/138M [00:24<00:13, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 64%|██████▍   | 88.4M/138M [00:24<00:13, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 64%|██████▍   | 88.7M/138M [00:24<00:13, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 64%|██████▍   | 89.1M/138M [00:24<00:13, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 65%|██████▍   | 89.5M/138M [00:24<00:13, 3.80MB/s]
-
-.. parsed-literal::
-
-
- 65%|██████▌   | 89.9M/138M [00:25<00:13, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 65%|██████▌   | 90.2M/138M [00:25<00:13, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 66%|██████▌   | 90.6M/138M [00:25<00:13, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 66%|██████▌   | 91.0M/138M [00:25<00:13, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 66%|██████▌   | 91.3M/138M [00:25<00:13, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 66%|██████▋   | 91.7M/138M [00:25<00:12, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 67%|██████▋   | 92.1M/138M [00:25<00:12, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 67%|██████▋   | 92.5M/138M [00:25<00:12, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 67%|██████▋   | 92.8M/138M [00:25<00:12, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 67%|██████▋   | 93.2M/138M [00:26<00:12, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 68%|██████▊   | 93.6M/138M [00:26<00:12, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 68%|██████▊   | 94.0M/138M [00:26<00:12, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 68%|██████▊   | 94.3M/138M [00:26<00:12, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 69%|██████▊   | 94.7M/138M [00:26<00:12, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 69%|██████▉   | 95.1M/138M [00:26<00:12, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 69%|██████▉   | 95.5M/138M [00:26<00:11, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 69%|██████▉   | 95.8M/138M [00:26<00:11, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 70%|██████▉   | 96.2M/138M [00:26<00:11, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 70%|██████▉   | 96.6M/138M [00:26<00:11, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 70%|███████   | 97.0M/138M [00:27<00:11, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 70%|███████   | 97.3M/138M [00:27<00:11, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 71%|███████   | 97.7M/138M [00:27<00:11, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 71%|███████   | 98.1M/138M [00:27<00:11, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 71%|███████   | 98.5M/138M [00:27<00:11, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 71%|███████▏  | 98.8M/138M [00:27<00:11, 3.73MB/s]
-
-.. parsed-literal::
-
-
- 72%|███████▏  | 99.2M/138M [00:27<00:10, 3.73MB/s]
-
-.. parsed-literal::
-
-
- 72%|███████▏  | 99.6M/138M [00:27<00:10, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 72%|███████▏  | 99.9M/138M [00:27<00:10, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 73%|███████▎  | 100M/138M [00:28<00:10, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 73%|███████▎  | 101M/138M [00:28<00:10, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 73%|███████▎  | 101M/138M [00:28<00:10, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 73%|███████▎  | 101M/138M [00:28<00:10, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 74%|███████▎  | 102M/138M [00:28<00:10, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 74%|███████▍  | 102M/138M [00:28<00:10, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 74%|███████▍  | 103M/138M [00:28<00:09, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 74%|███████▍  | 103M/138M [00:28<00:09, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 75%|███████▍  | 103M/138M [00:28<00:09, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 75%|███████▌  | 104M/138M [00:28<00:09, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 75%|███████▌  | 104M/138M [00:29<00:09, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 76%|███████▌  | 104M/138M [00:29<00:09, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 76%|███████▌  | 105M/138M [00:29<00:09, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 76%|███████▌  | 105M/138M [00:29<00:09, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 76%|███████▋  | 106M/138M [00:29<00:09, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 77%|███████▋  | 106M/138M [00:29<00:08, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 77%|███████▋  | 106M/138M [00:29<00:08, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 77%|███████▋  | 107M/138M [00:29<00:08, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 77%|███████▋  | 107M/138M [00:29<00:08, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 78%|███████▊  | 107M/138M [00:29<00:08, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 78%|███████▊  | 108M/138M [00:30<00:08, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 78%|███████▊  | 108M/138M [00:30<00:08, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 79%|███████▊  | 109M/138M [00:30<00:08, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 79%|███████▉  | 109M/138M [00:30<00:08, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 79%|███████▉  | 109M/138M [00:30<00:08, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 79%|███████▉  | 110M/138M [00:30<00:07, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 80%|███████▉  | 110M/138M [00:30<00:07, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 80%|███████▉  | 110M/138M [00:30<00:07, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 80%|████████  | 111M/138M [00:30<00:07, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 80%|████████  | 111M/138M [00:31<00:07, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 81%|████████  | 112M/138M [00:31<00:07, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 81%|████████  | 112M/138M [00:31<00:07, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 81%|████████  | 112M/138M [00:31<00:07, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 81%|████████▏ | 113M/138M [00:31<00:07, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 82%|████████▏ | 113M/138M [00:31<00:07, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 82%|████████▏ | 113M/138M [00:31<00:06, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 82%|████████▏ | 114M/138M [00:31<00:06, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 83%|████████▎ | 114M/138M [00:31<00:06, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 83%|████████▎ | 115M/138M [00:31<00:06, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 83%|████████▎ | 115M/138M [00:32<00:06, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 83%|████████▎ | 115M/138M [00:32<00:06, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 84%|████████▎ | 116M/138M [00:32<00:06, 3.69MB/s]
-
-.. parsed-literal::
-
-
- 84%|████████▍ | 116M/138M [00:32<00:06, 3.72MB/s]
-
-.. parsed-literal::
-
-
- 84%|████████▍ | 116M/138M [00:32<00:06, 3.73MB/s]
-
-.. parsed-literal::
-
-
- 84%|████████▍ | 117M/138M [00:32<00:05, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 85%|████████▍ | 117M/138M [00:32<00:05, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 85%|████████▌ | 118M/138M [00:32<00:05, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 85%|████████▌ | 118M/138M [00:32<00:05, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 86%|████████▌ | 118M/138M [00:33<00:05, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 86%|████████▌ | 119M/138M [00:33<00:05, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 86%|████████▌ | 119M/138M [00:33<00:05, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 86%|████████▋ | 119M/138M [00:33<00:05, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 87%|████████▋ | 120M/138M [00:33<00:05, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 87%|████████▋ | 120M/138M [00:33<00:05, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 87%|████████▋ | 120M/138M [00:33<00:04, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 87%|████████▋ | 121M/138M [00:33<00:04, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 88%|████████▊ | 121M/138M [00:33<00:04, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 88%|████████▊ | 122M/138M [00:33<00:04, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 88%|████████▊ | 122M/138M [00:34<00:04, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 89%|████████▊ | 122M/138M [00:34<00:04, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 89%|████████▉ | 123M/138M [00:34<00:04, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 89%|████████▉ | 123M/138M [00:34<00:04, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 89%|████████▉ | 123M/138M [00:34<00:04, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 90%|████████▉ | 124M/138M [00:34<00:04, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 90%|████████▉ | 124M/138M [00:34<00:03, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 90%|█████████ | 125M/138M [00:34<00:03, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 90%|█████████ | 125M/138M [00:34<00:03, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 91%|█████████ | 125M/138M [00:34<00:03, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 91%|█████████ | 126M/138M [00:35<00:03, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 91%|█████████ | 126M/138M [00:35<00:03, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 92%|█████████▏| 126M/138M [00:35<00:03, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 92%|█████████▏| 127M/138M [00:35<00:03, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 92%|█████████▏| 127M/138M [00:35<00:03, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 92%|█████████▏| 128M/138M [00:35<00:02, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 93%|█████████▎| 128M/138M [00:35<00:02, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 93%|█████████▎| 128M/138M [00:35<00:02, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 93%|█████████▎| 129M/138M [00:35<00:02, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 93%|█████████▎| 129M/138M [00:36<00:02, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 94%|█████████▎| 129M/138M [00:36<00:02, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 94%|█████████▍| 130M/138M [00:36<00:02, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 94%|█████████▍| 130M/138M [00:36<00:02, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 94%|█████████▍| 131M/138M [00:36<00:02, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 95%|█████████▍| 131M/138M [00:36<00:02, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 95%|█████████▌| 131M/138M [00:36<00:01, 3.78MB/s]
-
-.. parsed-literal::
-
-
- 95%|█████████▌| 132M/138M [00:36<00:01, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 96%|█████████▌| 132M/138M [00:36<00:01, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 96%|█████████▌| 132M/138M [00:36<00:01, 3.76MB/s]
-
-.. parsed-literal::
-
-
- 96%|█████████▌| 133M/138M [00:37<00:01, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 96%|█████████▋| 133M/138M [00:37<00:01, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 97%|█████████▋| 134M/138M [00:37<00:01, 3.79MB/s]
-
-.. parsed-literal::
-
-
- 97%|█████████▋| 134M/138M [00:37<00:01, 3.77MB/s]
-
-.. parsed-literal::
-
-
- 97%|█████████▋| 134M/138M [00:37<00:01, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 97%|█████████▋| 135M/138M [00:37<00:00, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 98%|█████████▊| 135M/138M [00:37<00:00, 3.72MB/s]
-
-.. parsed-literal::
-
-
- 98%|█████████▊| 135M/138M [00:37<00:00, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 98%|█████████▊| 136M/138M [00:37<00:00, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 99%|█████████▊| 136M/138M [00:38<00:00, 3.75MB/s]
-
-.. parsed-literal::
-
-
- 99%|█████████▉| 137M/138M [00:38<00:00, 3.74MB/s]
-
-.. parsed-literal::
-
-
- 99%|█████████▉| 137M/138M [00:38<00:00, 3.72MB/s]
-
-.. parsed-literal::
-
-
- 99%|█████████▉| 137M/138M [00:38<00:00, 3.72MB/s]
-
-.. parsed-literal::
 
 
-   100%|█████████▉| 138M/138M [00:38<00:00, 3.73MB/s]
 
-.. parsed-literal::
 
 
-   100%|█████████▉| 138M/138M [00:38<00:00, 3.74MB/s]
 
 .. parsed-literal::
 
+    coco_bike.jpg:   0%|          | 0.00/182k [00:00`__
+`Core `__
 class provides access to the OpenVINO Runtime API. The ``core`` object,
 which is an instance of the ``Core`` class represents the API and it is
 used to compile the model.
@@ -2150,14 +469,14 @@ from the dropdown list:
 
 .. code:: ipython3
 
-    DEVICE = widgets.Dropdown(
+    device = widgets.Dropdown(
         options=core.available_devices + ["AUTO"],
         value="AUTO",
         description="Device:",
         disabled=False,
     )
 
-    DEVICE
+    device
 
 
 
@@ -2208,22 +527,25 @@ pipeline.
 
 .. code:: ipython3
 
-    wrapped_model = OVWrapper(ov_model_path, device=DEVICE.value, stride=model.predictor.model.stride)
+    wrapped_model = OVWrapper(ov_model_path, device=device.value, stride=model.predictor.model.stride)
     model.predictor.model = wrapped_model
 
-    ov_results = model(image_uri, device=DEVICE.value, retina_masks=True, imgsz=640, conf=0.6, iou=0.9)
+    ov_results = model(image_uri, device=device.value, retina_masks=True, imgsz=640, conf=0.6, iou=0.9)
+
+
+
 
 
 
 
 .. parsed-literal::
 
-    image 1/1 /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/notebooks/261-fast-segment-anything/coco_bike.jpg: 480x640 33 objects, 310.8ms
+    image 1/1 /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/notebooks/261-fast-segment-anything/coco_bike.jpg: 480x640 33 objects, 299.4ms
 
 
 .. parsed-literal::
 
-    Speed: 3.3ms preprocess, 310.8ms inference, 26.3ms postprocess per image at shape (1, 3, 480, 640)
+    Speed: 2.4ms preprocess, 299.4ms inference, 25.3ms postprocess per image at shape (1, 3, 480, 640)
 
 
 One can observe the converted model outputs in the next cell, they is
@@ -2294,7 +616,7 @@ in the NNCF repo, refer there for more details. Moreover, you can check
 out other quantization tutorials in the `OV notebooks
 repo <230-yolov8-optimizati-with-output.html>`__.
 
-   **NOTE**: Model post-training quantization is time-consuming process.
+   **Note**: Model post-training quantization is time-consuming process.
    Be patient, it can take several minutes depending on your hardware.
 
 .. code:: ipython3
@@ -2495,7 +817,9 @@ repo <230-yolov8-optimizati-with-output.html>`__.
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -2509,7 +833,7 @@ repo <230-yolov8-optimizati-with-output.html>`__.
 
 .. parsed-literal::
 
-    /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-609/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/nncf/experimental/tensor/tensor.py:84: RuntimeWarning: invalid value encountered in multiply
+    /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-632/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/nncf/experimental/tensor/tensor.py:84: RuntimeWarning: invalid value encountered in multiply
       return Tensor(self.data * unwrap_tensor_data(other))
 
 
@@ -2520,7 +844,9 @@ repo <230-yolov8-optimizati-with-output.html>`__.
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -2549,7 +875,7 @@ calibration dataset to measure the performance.
     coco_dataset = COCOLoader(OUT_DIR / 'coco128/images/train2017')
     calibration_dataset_size = 128
 
-    wrapped_model = OVWrapper(ov_model_path, device=DEVICE.value, stride=model.predictor.model.stride)
+    wrapped_model = OVWrapper(ov_model_path, device=device.value, stride=model.predictor.model.stride)
     model.predictor.model = wrapped_model
 
     start_time = datetime.datetime.now()
@@ -2576,7 +902,7 @@ calibration dataset to measure the performance.
 
     %%skip not $do_quantize.value
 
-    quantized_wrapped_model = OVWrapper(quantized_model_path, device=DEVICE.value, stride=model.predictor.model.stride)
+    quantized_wrapped_model = OVWrapper(quantized_model_path, device=device.value, stride=model.predictor.model.stride)
     model.predictor.model = quantized_wrapped_model
 
     start_time = datetime.datetime.now()
@@ -2760,7 +1086,7 @@ based on user input.
         image = image.resize((new_w, new_h))
 
         results = model(image,
-                        device=DEVICE.value,
+                        device=device.value,
                         retina_masks=use_retina,
                         iou=iou_threshold,
                         conf=conf_threshold,
@@ -2920,7 +1246,7 @@ based on user input.
 
 
 
-.. .. raw:: html
 
-..    
+ + diff --git a/docs/notebooks/263-latent-consistency-models-image-generation-with-output.rst b/docs/notebooks/263-latent-consistency-models-image-generation-with-output.rst index 859e699f13b3ef..3a679731380ebc 100644 --- a/docs/notebooks/263-latent-consistency-models-image-generation-with-output.rst +++ b/docs/notebooks/263-latent-consistency-models-image-generation-with-output.rst @@ -81,7 +81,7 @@ Prerequisites .. code:: ipython3 %pip install -q "torch" --index-url https://download.pytorch.org/whl/cpu - %pip install -q "openvino>=2023.1.0" transformers "diffusers>=0.23.1" pillow gradio "nncf>=2.6.0" datasets --extra-index-url https://download.pytorch.org/whl/cpu + %pip install -q "openvino>=2023.1.0" transformers "diffusers>=0.23.1" pillow gradio "nncf>=2.7.0" datasets "peft==0.6.2" --extra-index-url https://download.pytorch.org/whl/cpu Prepare models for OpenVINO format conversion --------------------------------------------- @@ -96,7 +96,7 @@ fine-tune of `Stable-Diffusion v1-5 `__ using Latent Consistency Distillation (LCD) approach discussed above. This model is also integrated into -`Diffusers `__ library. +`Diffusers `__ library. Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. This allows us to compare running original Stable Diffusion @@ -119,15 +119,15 @@ provide which module should be loaded for initialization using from pathlib import Path from diffusers import DiffusionPipeline import numpy as np - - + + warnings.filterwarnings("ignore") - + TEXT_ENCODER_OV_PATH = Path("model/text_encoder.xml") UNET_OV_PATH = Path("model/unet.xml") VAE_DECODER_OV_PATH = Path("model/vae_decoder.xml") - - + + def load_orginal_pytorch_pipeline_componets(skip_models=False, skip_safety_checker=False): pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7") scheduler = pipe.scheduler @@ -161,7 +161,7 @@ provide which module should be loaded for initialization using and UNET_OV_PATH.exists() and VAE_DECODER_OV_PATH.exists() ) - + ( scheduler, tokenizer, @@ -246,8 +246,8 @@ hidden states. import torch import openvino as ov - - + + def cleanup_torchscript_cache(): """ Helper for removing cached model representation @@ -255,8 +255,8 @@ hidden states. torch._C._jit_clear_class_registry() torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() torch.jit._state._clear_class_state() - - + + def convert_encoder(text_encoder: torch.nn.Module, ir_path: Path): """ Convert Text Encoder mode. @@ -270,7 +270,7 @@ hidden states. input_ids = torch.ones((1, 77), dtype=torch.long) # switch model to inference mode text_encoder.eval() - + # disable gradients calculation for reducing memory consumption with torch.no_grad(): # Export model to IR format @@ -286,13 +286,13 @@ hidden states. cleanup_torchscript_cache() gc.collect() print(f"Text Encoder successfully converted to IR and saved to {ir_path}") - - + + if not TEXT_ENCODER_OV_PATH.exists(): convert_encoder(text_encoder, TEXT_ENCODER_OV_PATH) else: print(f"Text encoder will be loaded from {TEXT_ENCODER_OV_PATH}") - + del text_encoder gc.collect() @@ -360,8 +360,8 @@ Model predicts the ``sample`` state for the next step. cleanup_torchscript_cache() gc.collect() print(f"Unet successfully converted to IR and saved to {ir_path}") - - + + if not UNET_OV_PATH.exists(): convert_unet(unet, UNET_OV_PATH) else: @@ -419,18 +419,18 @@ VAE encoder, can be found in Stable Diffusion notebook. Returns: None """ - + class VAEDecoderWrapper(torch.nn.Module): def __init__(self, vae): super().__init__() self.vae = vae - + def forward(self, latents): return self.vae.decode(latents) - + vae_decoder = VAEDecoderWrapper(vae) latents = torch.zeros((1, 4, 64, 64)) - + vae_decoder.eval() with torch.no_grad(): ov_model = ov.convert_model(vae_decoder, example_input=latents) @@ -438,13 +438,13 @@ VAE encoder, can be found in Stable Diffusion notebook. del ov_model cleanup_torchscript_cache() print(f"VAE decoder successfully converted to IR and saved to {ir_path}") - - + + if not VAE_DECODER_OV_PATH.exists(): convert_vae_decoder(vae, VAE_DECODER_OV_PATH) else: print(f"VAE decoder will be loaded from {VAE_DECODER_OV_PATH}") - + del vae gc.collect() @@ -504,8 +504,8 @@ decoded by the decoder part of the variational auto encoder. ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.image_processor import VaeImageProcessor - - + + class OVLatentConsistencyModelPipeline(DiffusionPipeline): def __init__( self, @@ -528,7 +528,7 @@ decoded by the decoder part of the variational auto encoder. self.feature_extractor = feature_extractor self.vae_scale_factor = 2**3 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) - + def _encode_prompt( self, prompt, @@ -546,9 +546,9 @@ decoded by the decoder part of the variational auto encoder. Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. """ - + if prompt_embeds is None: - + text_inputs = self.tokenizer( prompt, padding="max_length", @@ -560,7 +560,7 @@ decoded by the decoder part of the variational auto encoder. untruncated_ids = self.tokenizer( prompt, padding="longest", return_tensors="pt" ).input_ids - + if untruncated_ids.shape[-1] >= text_input_ids.shape[ -1 ] and not torch.equal(text_input_ids, untruncated_ids): @@ -571,20 +571,20 @@ decoded by the decoder part of the variational auto encoder. "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) - + prompt_embeds = self.text_encoder(text_input_ids, share_inputs=True, share_outputs=True) prompt_embeds = torch.from_numpy(prompt_embeds[0]) - + bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view( bs_embed * num_images_per_prompt, seq_len, -1 ) - + # Don't need to get uncond prompt embedding because of LCM Guided Distillation return prompt_embeds - + def run_safety_checker(self, image, dtype): if self.safety_checker is None: has_nsfw_concept = None @@ -602,7 +602,7 @@ decoded by the decoder part of the variational auto encoder. images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept - + def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, latents=None ): @@ -617,7 +617,7 @@ decoded by the decoder part of the variational auto encoder. # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents - + def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 @@ -630,7 +630,7 @@ decoded by the decoder part of the variational auto encoder. """ assert len(w.shape) == 1 w = w * 1000.0 - + half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) @@ -640,7 +640,7 @@ decoded by the decoder part of the variational auto encoder. emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb - + @torch.no_grad() def __call__( self, @@ -657,7 +657,7 @@ decoded by the decoder part of the variational auto encoder. return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ): - + # 1. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 @@ -665,21 +665,21 @@ decoded by the decoder part of the variational auto encoder. batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] - + # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG) - + # 2. Encode input prompt prompt_embeds = self._encode_prompt( prompt, num_images_per_prompt, prompt_embeds=prompt_embeds, ) - + # 3. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, original_inference_steps=lcm_origin_steps) timesteps = self.scheduler.timesteps - + # 4. Prepare latent variable num_channels_latents = 4 latents = self.prepare_latents( @@ -690,28 +690,28 @@ decoded by the decoder part of the variational auto encoder. prompt_embeds.dtype, latents, ) - + bs = batch_size * num_images_per_prompt - + # 5. Get Guidance Scale Embedding w = torch.tensor(guidance_scale).repeat(bs) w_embedding = self.get_w_embedding(w, embedding_dim=256) - + # 6. LCM MultiStep Sampling Loop: with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): - + ts = torch.full((bs,), t, dtype=torch.long) - + # model prediction (v-prediction, eps, x) model_pred = self.unet([latents, ts, prompt_embeds, w_embedding], share_inputs=True, share_outputs=True)[0] - + # compute the previous noisy sample x_t -> x_t-1 latents, denoised = self.scheduler.step( torch.from_numpy(model_pred), t, latents, return_dict=False ) progress_bar.update() - + if not output_type == "latent": image = torch.from_numpy(self.vae_decoder(denoised / 0.18215, share_inputs=True, share_outputs=True)[0]) image, has_nsfw_concept = self.run_safety_checker( @@ -720,19 +720,19 @@ decoded by the decoder part of the variational auto encoder. else: image = denoised has_nsfw_concept = None - + if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] - + image = self.image_processor.postprocess( image, output_type=output_type, do_denormalize=do_denormalize ) - + if not return_dict: return (image, has_nsfw_concept) - + return StableDiffusionPipelineOutput( images=image, nsfw_content_detected=has_nsfw_concept ) @@ -749,16 +749,16 @@ inference using OpenVINO. .. code:: ipython3 core = ov.Core() - + import ipywidgets as widgets - + device = widgets.Dropdown( options=core.available_devices + ["AUTO"], value="CPU", description="Device:", disabled=False, ) - + device @@ -774,9 +774,9 @@ inference using OpenVINO. text_enc = core.compile_model(TEXT_ENCODER_OV_PATH, device.value) unet_model = core.compile_model(UNET_OV_PATH, device.value) - + ov_config = {"INFERENCE_PRECISION_HINT": "f32"} if device.value != "CPU" else {} - + vae_decoder = core.compile_model(VAE_DECODER_OV_PATH, device.value, ov_config) Model tokenizer and scheduler are also important parts of the pipeline. @@ -811,7 +811,7 @@ Now, let’s see model in action prompt = "a beautiful pink unicorn, 8k" num_inference_steps = 4 torch.manual_seed(1234567) - + images = ov_pipe( prompt=prompt, num_inference_steps=num_inference_steps, @@ -878,7 +878,7 @@ improve model inference speed. description='Quantization', disabled=False, ) - + to_quantize @@ -897,12 +897,12 @@ Let’s load ``skip magic`` extension to skip quantization if import sys sys.path.append("../utils") - + int8_pipe = None - + if to_quantize.value and "GPU" in device.value: to_quantize.value = False - + %load_ext skip_kernel_extension Prepare calibration dataset @@ -918,34 +918,34 @@ model inputs for calibration we should customize ``CompiledModel``. .. code:: ipython3 %%skip not $to_quantize.value - + import datasets from tqdm.notebook import tqdm from transformers import set_seed from typing import Any, Dict, List - + set_seed(1) - + class CompiledModelDecorator(ov.CompiledModel): def __init__(self, compiled_model, prob: float, data_cache: List[Any] = None): super().__init__(compiled_model) self.data_cache = data_cache if data_cache else [] self.prob = np.clip(prob, 0, 1) - + def __call__(self, *args, **kwargs): if np.random.rand() >= self.prob: self.data_cache.append(*args) return super().__call__(*args, **kwargs) - + def collect_calibration_data(lcm_pipeline: OVLatentConsistencyModelPipeline, subset_size: int) -> List[Dict]: original_unet = lcm_pipeline.unet lcm_pipeline.unet = CompiledModelDecorator(original_unet, prob=0.3) - + dataset = datasets.load_dataset("conceptual_captions", split="train").shuffle(seed=42) lcm_pipeline.set_progress_bar_config(disable=True) safety_checker = lcm_pipeline.safety_checker lcm_pipeline.safety_checker = None - + # Run inference for data collection pbar = tqdm(total=subset_size) diff = 0 @@ -968,7 +968,7 @@ model inputs for calibration we should customize ``CompiledModel``. break pbar.update(collected_subset_size - diff) diff = collected_subset_size - + calibration_dataset = lcm_pipeline.unet.data_cache lcm_pipeline.set_progress_bar_config(disable=False) lcm_pipeline.unet = original_unet @@ -978,11 +978,11 @@ model inputs for calibration we should customize ``CompiledModel``. .. code:: ipython3 %%skip not $to_quantize.value - + import logging logging.basicConfig(level=logging.WARNING) logger = logging.getLogger(__name__) - + UNET_INT8_OV_PATH = Path("model/unet_int8.xml") if not UNET_INT8_OV_PATH.exists(): subset_size = 200 @@ -1008,10 +1008,10 @@ Create a quantized model from the pre-trained converted OpenVINO model. .. code:: ipython3 %%skip not $to_quantize.value - + import nncf from nncf.scopes import IgnoredScope - + if UNET_INT8_OV_PATH.exists(): print("Loading quantized model") quantized_unet = core.read_model(UNET_INT8_OV_PATH) @@ -1020,7 +1020,6 @@ Create a quantized model from the pre-trained converted OpenVINO model. quantized_unet = nncf.quantize( model=unet, subset_size=subset_size, - preset=nncf.QuantizationPreset.MIXED, calibration_dataset=nncf.Dataset(unet_calibration_data), model_type=nncf.ModelType.TRANSFORMER, advanced_parameters=nncf.AdvancedQuantizationParameters( @@ -1042,7 +1041,9 @@ Create a quantized model from the pre-trained converted OpenVINO model. +.. raw:: html +

 
 
 
@@ -1061,7 +1062,9 @@ Create a quantized model from the pre-trained converted OpenVINO model.
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -1085,7 +1088,9 @@ Create a quantized model from the pre-trained converted OpenVINO model.
 
 
 
+.. raw:: html
 
+    

 
 
 
@@ -1100,9 +1105,9 @@ Create a quantized model from the pre-trained converted OpenVINO model.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     unet_optimized = core.compile_model(UNET_INT8_OV_PATH, device.value)
-
+    
     int8_pipe = OVLatentConsistencyModelPipeline(
         tokenizer=tokenizer,
         text_encoder=text_enc,
@@ -1119,13 +1124,13 @@ data.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     from IPython.display import display
-
+    
     prompt = "a beautiful pink unicorn, 8k"
     num_inference_steps = 4
     torch.manual_seed(1234567)
-
+    
     images = int8_pipe(
         prompt=prompt,
         num_inference_steps=num_inference_steps,
@@ -1135,7 +1140,7 @@ data.
         height=512,
         width=512,
     ).images
-
+    
     display(images[0])
 
 
@@ -1164,9 +1169,9 @@ pipelines, we use median inference time on calibration subset.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     import time
-
+    
     validation_size = 10
     calibration_dataset = datasets.load_dataset("conceptual_captions", split="train")
     validation_data = []
@@ -1175,7 +1180,7 @@ pipelines, we use median inference time on calibration subset.
             break
         prompt = batch["caption"]
         validation_data.append(prompt)
-
+    
     def calculate_inference_time(pipeline, calibration_dataset):
         inference_time = []
         pipeline.set_progress_bar_config(disable=True)
@@ -1200,7 +1205,7 @@ pipelines, we use median inference time on calibration subset.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     fp_latency = calculate_inference_time(ov_pipe, validation_data)
     int8_latency = calculate_inference_time(int8_pipe, validation_data)
     print(f"Performance speed up: {fp_latency / int8_latency:.3f}")
@@ -1219,10 +1224,10 @@ Compare UNet file size
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     fp16_ir_model_size = UNET_OV_PATH.with_suffix(".bin").stat().st_size / 1024
     quantized_model_size = UNET_INT8_OV_PATH.with_suffix(".bin").stat().st_size / 1024
-
+    
     print(f"FP16 model size: {fp16_ir_model_size:.2f} KB")
     print(f"INT8 model size: {quantized_model_size:.2f} KB")
     print(f"Model compression rate: {fp16_ir_model_size / quantized_model_size:.3f}")
@@ -1245,9 +1250,9 @@ Interactive demo
     import random
     import gradio as gr
     from functools import partial
-
+    
     MAX_SEED = np.iinfo(np.int32).max
-
+    
     examples = [
         "portrait photo of a girl, photograph, highly detailed face, depth of field, moody light, golden hour,"
         "style by Dan Winters, Russell James, Steve McCurry, centered, extremely detailed, Nikon D850, award winning photography",
@@ -1255,16 +1260,16 @@ Interactive demo
         "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
         "A photo of beautiful mountain with realistic sunset and blue lake, highly detailed, masterpiece",
     ]
-
-
+    
+    
     def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
         if randomize_seed:
             seed = random.randint(0, MAX_SEED)
         return seed
-
-
+    
+    
     MAX_IMAGE_SIZE = 768
-
+    
     def generate(
         pipeline: OVLatentConsistencyModelPipeline,
         prompt: str,
@@ -1290,11 +1295,11 @@ Interactive demo
             output_type="pil",
         ).images[0]
         return result, seed
-
+    
     generate_original = partial(generate, ov_pipe)
     generate_optimized = partial(generate, int8_pipe)
     quantized_model_present = int8_pipe is not None
-
+    
     with gr.Blocks() as demo:
         with gr.Group():
             with gr.Row():
@@ -1312,7 +1317,7 @@ Interactive demo
                 with gr.Column(visible=quantized_model_present):
                     result_optimized = gr.Image(label="Result (Optimized)", type="pil", visible=quantized_model_present)
                     run_quantized_button = gr.Button(value="Run quantized", visible=quantized_model_present)
-
+    
         with gr.Accordion("Advanced options", open=False):
             seed = gr.Slider(
                 label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True
@@ -1348,14 +1353,14 @@ Interactive demo
                     step=1,
                     value=4,
                 )
-
+    
         gr.Examples(
             examples=examples,
             inputs=prompt,
             outputs=result,
             cache_examples=False,
         )
-
+    
         gr.on(
             triggers=[
                 prompt.submit,
@@ -1373,7 +1378,7 @@ Interactive demo
             ],
             outputs=[result, seed],
         )
-
+    
         if quantized_model_present:
             gr.on(
                 triggers=[
diff --git a/docs/notebooks/263-latent-consistency-models-optimum-demo-with-output.rst b/docs/notebooks/263-latent-consistency-models-optimum-demo-with-output.rst
new file mode 100644
index 00000000000000..c6acc725570f76
--- /dev/null
+++ b/docs/notebooks/263-latent-consistency-models-optimum-demo-with-output.rst
@@ -0,0 +1,349 @@
+Latent Consistency Model using Optimum-Intel OpenVINO
+=====================================================
+
+This notebook provides instructions how to run Latent Consistency Model
+(LCM). It allows to setup standard Hugging Face diffusers pipeline and
+Optimum Intel pipeline optimized for Intel hardware including CPU and
+GPU. Running inference on CPU and GPU it is easy to compare performance
+and time required to generate an image for provided prompt. The notebook
+can be also used on other Intel hardware with minimal or no
+modifications.
+
+|image0|
+
+Optimum Intel is an interface from Hugging Face between both diffusers
+and transformers libraries and various tools provided by Intel to
+accelerate pipelines on Intel hardware. It allows to perform
+quantization of the models hosted on Hugging Face. In this notebook
+OpenVINO is used for AI-inference acceleration as a backend for Optimum
+Intel!
+
+For more details please refer to Optimum Intel repository
+https://github.com/huggingface/optimum-intel
+
+LCMs are the next generation of generative models after Latent Diffusion
+Models (LDMs). They are proposed to overcome the slow iterative sampling
+process of Latent Diffusion Models (LDMs), enabling fast inference with
+minimal steps (from 2 to 4) on any pre-trained LDMs (e.g. Stable
+Diffusion). To read more about LCM please refer to
+https://latent-consistency-models.github.io/
+
+Table of contents:
+^^^^^^^^^^^^^^^^^^
+
+-  `Prerequisites <#prerequisites>`__
+-  `Full precision model on the
+   CPU <#using-full-precision-model-in-cpu-with-latentconsistencymodelpipeline>`__
+-  `Running inference using Optimum Intel
+   OVLatentConsistencyModelPipeline <#running-inference-using-optimum-intel-ovlatentconsistencymodelpipeline>`__
+
+.. |image0| image:: https://github.com/openvinotoolkit/openvino_notebooks/assets/10940214/1858dae4-72fd-401e-b055-66d503d82446
+
+Prerequisites
+~~~~~~~~~~~~~
+
+
+
+Install required packages
+
+.. code:: ipython3
+
+    %pip install -q "openvino>=2023.3.0"
+    %pip install -q "onnx>=1.11.0"
+    %pip install -q "optimum-intel[diffusers]@git+https://github.com/huggingface/optimum-intel.git" "ipywidgets" "transformers>=4.33.0" --extra-index-url https://download.pytorch.org/whl/cpu
+
+
+.. parsed-literal::
+
+    DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063
+    
+
+.. parsed-literal::
+
+    Note: you may need to restart the kernel to use updated packages.
+
+
+.. parsed-literal::
+
+    DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063
+    
+
+.. parsed-literal::
+
+    Note: you may need to restart the kernel to use updated packages.
+
+
+.. parsed-literal::
+
+    DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063
+    
+
+.. parsed-literal::
+
+    Note: you may need to restart the kernel to use updated packages.
+
+
+.. code:: ipython3
+
+    import warnings
+    warnings.filterwarnings('ignore')
+
+Showing Info Available Devices
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+
+The ``available_devices`` property shows the available devices in your
+system. The “FULL_DEVICE_NAME” option to ``ie.get_property()`` shows the
+name of the device. Check what is the ID name for the discrete GPU, if
+you have integrated GPU (iGPU) and discrete GPU (dGPU), it will show
+``device_name="GPU.0"`` for iGPU and ``device_name="GPU.1"`` for dGPU.
+If you just have either an iGPU or dGPU that will be assigned to
+``"GPU"``
+
+Note: For more details about GPU with OpenVINO visit this
+`link `__.
+If you have been facing any issue in Ubuntu 20.04 or Windows 11 read
+this
+`blog `__.
+
+.. code:: ipython3
+
+    import openvino as ov
+    core = ov.Core()
+    devices = core.available_devices
+    
+    for device in devices:
+        device_name = core.get_property(device, "FULL_DEVICE_NAME")
+        print(f"{device}: {device_name}")
+
+
+
+.. parsed-literal::
+
+    CPU: Intel(R) Core(TM) i9-10920X CPU @ 3.50GHz
+
+
+Using full precision model in CPU with ``LatentConsistencyModelPipeline``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+
+Standard pipeline for the Latent Consistency Model(LCM) from Diffusers
+library is used here. For more information please refer to
+https://huggingface.co/docs/diffusers/en/api/pipelines/latent_consistency_models
+
+.. code:: ipython3
+
+    from diffusers import LatentConsistencyModelPipeline
+    import gc
+    
+    pipeline = LatentConsistencyModelPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7")
+
+
+
+.. parsed-literal::
+
+    2024-03-13 00:06:35.851024: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
+    2024-03-13 00:06:35.885328: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
+    To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
+
+
+.. parsed-literal::
+
+    2024-03-13 00:06:36.470803: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
+
+
+
+.. parsed-literal::
+
+    Loading pipeline components...:   0%|          | 0/7 [00:00=0.22.0" "controlnet-aux>=0.0.6" accelerate --extra-index-url https://download.pytorch.org/whl/cpu
+    %pip install -q "torch" transformers "diffusers>=0.22.0" "controlnet-aux>=0.0.6" "peft==0.6.2" accelerate --extra-index-url https://download.pytorch.org/whl/cpu
     %pip install -q "openvino>=2023.2.0" pillow gradio datasets "nncf>=2.7.0"
 
 Prepare PyTorch models
@@ -232,18 +232,18 @@ Prepare PyTorch models
 .. code:: ipython3
 
     from pathlib import Path
-
+    
     controlnet_id = "lllyasviel/control_v11p_sd15_normalbae"
     adapter_id = "latent-consistency/lcm-lora-sdv1-5"
     stable_diffusion_id = "runwayml/stable-diffusion-v1-5"
-
+    
     TEXT_ENCODER_OV_PATH = Path('model/text_encoder.xml')
     UNET_OV_PATH = Path('model/unet_controlnet.xml')
     CONTROLNET_OV_PATH = Path('model/controlnet-normalbae.xml')
     VAE_DECODER_OV_PATH = Path('model/vae_decoder.xml')
     TOKENIZER_PATH = Path('model/tokenizer')
     SCHEDULER_PATH = Path('model/scheduler')
-
+    
     skip_models = TEXT_ENCODER_OV_PATH.exists() and UNET_OV_PATH.exists() and CONTROLNET_OV_PATH.exists() and VAE_DECODER_OV_PATH.exists()
 
 Load Original Diffusers pipeline and prepare models for conversion
@@ -274,12 +274,12 @@ ControlNet model 3. Load LoRA weights to the pipeline using
 
     from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
     import gc
-
-
+    
+    
     def load_original_pytorch_pipeline_components(controlnet_id:str, stable_diffusion_id:str, adapter_id:str):
         """
         Helper function for loading Stable Diffusion ControlNet pipeline and applying LCM LoRA
-
+    
         Parameters:
           controlnet_id: model id from HuggingFace hub or local path for loading ControlNet model
           stable_diffusion_id: model id from HuggingFace hub or local path for loading Stable Diffusion model
@@ -290,7 +290,7 @@ ControlNet model 3. Load LoRA weights to the pipeline using
           unet: Stable Diffusion U-Net
           vae: Stable Diffusion Variational Autoencoder (VAE)
         """
-
+        
         # load controlnet model
         controlnet = ControlNetModel.from_pretrained(controlnet_id)
         # load stable diffusion pipeline
@@ -348,20 +348,20 @@ color-coded image.
     import matplotlib.pyplot as plt
     from PIL import Image
     import numpy as np
-
+    
     example_image_url = "https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/input.png"
     urlretrieve(example_image_url, "example.png")
-
+    
     processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
-
+    
     image = load_image("example.png")
     control_image = processor(image)
-
-
+    
+    
     def visualize_results(orig_img:Image.Image, normal_img:Image.Image, result_img:Image.Image = None, save_fig:bool = False):
         """
         Helper function for results visualization
-
+        
         Parameters:
            orig_img (Image.Image): original image
            normal_img (Image.Image): image with bwith surface normal information
@@ -389,18 +389,18 @@ color-coded image.
         list_axes[0].imshow(np.array(orig_img))
         list_axes[1].imshow(np.array(normal_img))
         list_axes[0].set_title(orig_title, fontsize=15)
-        list_axes[1].set_title(control_title, fontsize=15)
+        list_axes[1].set_title(control_title, fontsize=15) 
         if result_img is not None:
             list_axes[2].imshow(np.array(result_img))
             list_axes[2].set_title("Result", fontsize=15)
-
+        
         fig.subplots_adjust(wspace=0.01 if is_horizontal else 0.00 , hspace=0.01 if is_horizontal else 0.1)
         fig.tight_layout()
         if save_fig:
             fig.savefig("result.png", bbox_inches='tight')
         return fig
-
-
+    
+    
     fig = visualize_results(image, control_image)
 
 
@@ -458,7 +458,7 @@ blocks, which serves additional context for the UNet model.
     import torch
     import openvino as ov
     from functools import partial
-
+    
     def cleanup_torchscript_cache():
         """
         Helper for removing cached model representation
@@ -466,8 +466,8 @@ blocks, which serves additional context for the UNet model.
         torch._C._jit_clear_class_registry()
         torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore()
         torch.jit._state._clear_class_state()
-
-
+    
+    
     def flattenize_inputs(inputs):
         """
         Helper function for resolve nested input structure (e.g. lists or tuples of tensors)
@@ -481,16 +481,16 @@ blocks, which serves additional context for the UNet model.
             else:
                 flatten_inputs.append(input_data)
         return flatten_inputs
-
-
+    
+    
     dtype_mapping = {
         torch.float32: ov.Type.f32,
         torch.float64: ov.Type.f64,
         torch.int32: ov.Type.i32,
         torch.int64: ov.Type.i64
     }
-
-
+    
+    
     def prepare_input_info(input_dict):
         """
         Helper function for preparing input info (shapes and data types) for conversion based on example inputs
@@ -504,25 +504,25 @@ blocks, which serves additional context for the UNet model.
             if input_data.ndim == 4:
                 updated_shape[2] = -1
                 updated_shape[3] = -1
-
+    
             input_info.append((dtype_mapping[input_data.dtype], updated_shape))
         return input_info
-
-
+    
+    
     inputs = {
         "sample": torch.randn((1, 4, 64, 64)),
         "timestep": torch.tensor(1, dtype=torch.float32),
         "encoder_hidden_states": torch.randn((1,77,768)),
         "controlnet_cond": torch.randn((1,3,512,512))
     }
-
-
+    
+    
     # Prepare conditional inputs for U-Net
-    if not UNET_OV_PATH.exists():
+    if not UNET_OV_PATH.exists():    
         controlnet.eval()
         with torch.no_grad():
             down_block_res_samples, mid_block_res_sample = controlnet(**inputs, return_dict=False)
-
+        
     if not CONTROLNET_OV_PATH.exists():
         input_info = prepare_input_info(inputs)
         with torch.no_grad():
@@ -534,7 +534,7 @@ blocks, which serves additional context for the UNet model.
         print('ControlNet successfully converted to IR')
     else:
         print(f"ControlNet will be loaded from {CONTROLNET_OV_PATH}")
-
+    
     del controlnet
     gc.collect()
 
@@ -564,15 +564,15 @@ generated by ControlNet.
 .. code:: ipython3
 
     from typing import Tuple
-
+    
     class UnetWrapper(torch.nn.Module):
         def __init__(
-            self,
-            unet,
-            sample_dtype=torch.float32,
-            timestep_dtype=torch.int64,
-            encoder_hidden_states=torch.float32,
-            down_block_additional_residuals=torch.float32,
+            self, 
+            unet, 
+            sample_dtype=torch.float32, 
+            timestep_dtype=torch.int64, 
+            encoder_hidden_states=torch.float32, 
+            down_block_additional_residuals=torch.float32, 
             mid_block_additional_residual=torch.float32
         ):
             super().__init__()
@@ -582,13 +582,13 @@ generated by ControlNet.
             self.encoder_hidden_states_dtype = encoder_hidden_states
             self.down_block_additional_residuals_dtype = down_block_additional_residuals
             self.mid_block_additional_residual_dtype = mid_block_additional_residual
-
+    
         def forward(
-            self,
-            sample:torch.Tensor,
-            timestep:torch.Tensor,
-            encoder_hidden_states:torch.Tensor,
-            down_block_additional_residuals:Tuple[torch.Tensor],
+            self, 
+            sample:torch.Tensor, 
+            timestep:torch.Tensor, 
+            encoder_hidden_states:torch.Tensor, 
+            down_block_additional_residuals:Tuple[torch.Tensor],  
             mid_block_additional_residual:torch.Tensor
         ):
             sample.to(self.sample_dtype)
@@ -597,27 +597,27 @@ generated by ControlNet.
             down_block_additional_residuals = [res.to(self.down_block_additional_residuals_dtype) for res in down_block_additional_residuals]
             mid_block_additional_residual.to(self.mid_block_additional_residual_dtype)
             return self.unet(
-                sample,
-                timestep,
-                encoder_hidden_states,
-                down_block_additional_residuals=down_block_additional_residuals,
+                sample, 
+                timestep, 
+                encoder_hidden_states, 
+                down_block_additional_residuals=down_block_additional_residuals, 
                 mid_block_additional_residual=mid_block_additional_residual
             )
-
-
-
+    
+    
+    
     if not UNET_OV_PATH.exists():
         inputs.pop("controlnet_cond", None)
         inputs["down_block_additional_residuals"] = down_block_res_samples
         inputs["mid_block_additional_residual"] = mid_block_res_sample
         input_info = prepare_input_info(inputs)
-
+    
         wrapped_unet = UnetWrapper(unet)
         wrapped_unet.eval()
-
+    
         with torch.no_grad():
             ov_model = ov.convert_model(wrapped_unet, example_input=inputs)
-
+            
         for (input_dtype, input_shape), input_tensor in zip(input_info, ov_model.inputs):
             input_tensor.get_node().set_partial_shape(ov.PartialShape(input_shape))
             input_tensor.get_node().set_element_type(input_dtype)
@@ -670,9 +670,9 @@ hidden states.
 
     def convert_encoder(text_encoder:torch.nn.Module, ir_path:Path):
         """
-        Convert Text Encoder model to OpenVINO IR.
+        Convert Text Encoder model to OpenVINO IR. 
         Function accepts text encoder model, prepares example inputs for conversion, and convert it to OpenVINO Model
-        Parameters:
+        Parameters: 
             text_encoder (torch.nn.Module): text_encoder model
             ir_path (Path): File for storing model
         Returns:
@@ -682,7 +682,7 @@ hidden states.
             input_ids = torch.ones((1, 77), dtype=torch.long)
             # switch model to inference mode
             text_encoder.eval()
-
+    
             # disable gradients calculation for reducing memory consumption
             with torch.no_grad():
                 ov_model = ov.convert_model(
@@ -694,8 +694,8 @@ hidden states.
                 del ov_model
             cleanup_torchscript_cache()
             print('Text Encoder successfully converted to IR')
-
-
+        
+    
     if not TEXT_ENCODER_OV_PATH.exists():
         convert_encoder(text_encoder, TEXT_ENCODER_OV_PATH)
     else:
@@ -741,10 +741,10 @@ diffusion
 
     def convert_vae_decoder(vae: torch.nn.Module, ir_path: Path):
         """
-        Convert VAE model to IR format.
-        Function accepts pipeline, creates wrapper class for export only necessary for inference part,
-        prepares example inputs for convert,
-        Parameters:
+        Convert VAE model to IR format. 
+        Function accepts pipeline, creates wrapper class for export only necessary for inference part, 
+        prepares example inputs for convert, 
+        Parameters: 
             vae (torch.nn.Module): VAE model
             ir_path (Path): File for storing model
         Returns:
@@ -754,14 +754,14 @@ diffusion
             def __init__(self, vae):
                 super().__init__()
                 self.vae = vae
-
+    
             def forward(self, latents):
                 return self.vae.decode(latents)
-
+    
         if not ir_path.exists():
             vae_decoder = VAEDecoderWrapper(vae)
             latents = torch.zeros((1, 4, 64, 64))
-
+    
             vae_decoder.eval()
             with torch.no_grad():
                 ov_model = ov.convert_model(vae_decoder, example_input=latents, input=[-1, 4, -1, -1])
@@ -769,13 +769,13 @@ diffusion
             del ov_model
             cleanup_torchscript_cache()
             print('VAE decoder successfully converted to IR')
-
-
+    
+    
     if not VAE_DECODER_OV_PATH.exists():
         convert_vae_decoder(vae, VAE_DECODER_OV_PATH)
     else:
         print(f"VAE decoder will be loaded from {VAE_DECODER_OV_PATH}")
-
+    
     del vae
 
 
@@ -803,13 +803,13 @@ OpenVINO.
     from transformers import CLIPTokenizer
     from typing import Union, List, Optional, Tuple
     import cv2
-
-
+    
+    
     def scale_fit_to_window(dst_width:int, dst_height:int, image_width:int, image_height:int):
         """
-        Preprocessing helper function for calculating image size for resize with peserving original aspect ratio
+        Preprocessing helper function for calculating image size for resize with peserving original aspect ratio 
         and fitting image to specific window size
-
+        
         Parameters:
           dst_width (int): destination window width
           dst_height (int): destination window height
@@ -821,15 +821,15 @@ OpenVINO.
         """
         im_scale = min(dst_height / image_height, dst_width / image_width)
         return int(im_scale * image_width), int(im_scale * image_height)
-
-
+    
+    
     def preprocess(image: Image.Image, dst_height:int = 512, dst_width:int = 512):
         """
         Image preprocessing function. Takes image in PIL.Image format, resizes it to keep aspect ration and fits to model input window 512x512,
         then converts it to np.ndarray and adds padding with zeros on right or bottom side of image (depends from aspect ratio), after that
         converts data to float32 data type and change range of values from [0, 255] to [-1, 1], finally, converts data layout from planar NHWC to NCHW.
         The function returns preprocessed input tensor and padding size, which can be used in postprocessing.
-
+        
         Parameters:
           image (Image.Image): input image
           dst_width: destination image width
@@ -848,15 +848,15 @@ OpenVINO.
         image = image.astype(np.float32) / 255.0
         image = image.transpose(0, 3, 1, 2)
         return image, pad
-
-
+    
+    
     def randn_tensor(
         shape: Union[Tuple, List],
         dtype: Optional[torch.dtype] = torch.float32,
     ):
         """
         Helper function for generation random values tensor with given shape and data type
-
+        
         Parameters:
           shape (Union[Tuple, List]): shape for filling random values
           dtype (torch.dtype, *optiona*, torch.float32): data type for result
@@ -865,8 +865,8 @@ OpenVINO.
         """
         latents = torch.randn(shape, dtype=dtype)
         return latents.numpy()
-
-
+    
+    
     class OVControlNetStableDiffusionPipeline(DiffusionPipeline):
         """
         OpenVINO inference pipeline for Stable Diffusion with ControlNet guidence
@@ -887,11 +887,11 @@ OpenVINO.
             self.vae_scale_factor = 8
             self.scheduler = scheduler
             self.load_models(core, device, controlnet, text_encoder, unet, vae_decoder)
-
+    
         def load_models(self, core: ov.Core, device: str, controlnet:ov.Model, text_encoder: ov.Model, unet: ov.Model, vae_decoder: ov.Model):
             """
             Function for loading models on device using OpenVINO
-
+            
             Parameters:
               core (Core): OpenVINO runtime Core class instance
               device (str): inference device
@@ -907,7 +907,7 @@ OpenVINO.
             self.register_to_config(unet=core.compile_model(unet, device))
             ov_config = {"INFERENCE_PRECISION_HINT": "f32"} if device != "CPU" else {}
             self.vae_decoder = core.compile_model(vae_decoder, device, ov_config)
-
+    
         def __call__(
             self,
             prompt: Union[str, List[str]],
@@ -923,7 +923,7 @@ OpenVINO.
         ):
             """
             Function invoked when calling the pipeline for generation.
-
+    
             Parameters:
                 prompt (`str` or `List[str]`):
                     The prompt or prompts to guide the image generation.
@@ -951,9 +951,9 @@ OpenVINO.
                     [PIL](https://pillow.readthedocs.io/en/stable/): `Image.Image` or `np.array`.
             Returns:
                 image ([List[Union[np.ndarray, Image.Image]]): generaited images
-
+                
             """
-
+    
             # 1. Define call parameters
             batch_size = 1 if isinstance(prompt, str) else len(prompt)
             if guidance_scale < 1 and negative_prompt:
@@ -964,17 +964,17 @@ OpenVINO.
             do_classifier_free_guidance = guidance_scale > 1.0
             # 2. Encode input prompt
             text_embeddings = self._encode_prompt(prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt)
-
+    
             # 3. Preprocess image
             orig_width, orig_height = image.size
             image, pad = preprocess(image, height, width)
             if do_classifier_free_guidance:
                 image = np.concatenate(([image] * 2))
-
+    
             # 4. set timesteps
             self.scheduler.set_timesteps(num_inference_steps)
             timesteps = self.scheduler.timesteps
-
+    
             # 5. Prepare latent variables
             num_channels_latents = 4
             latents = self.prepare_latents(
@@ -984,7 +984,7 @@ OpenVINO.
                 width,
                 latents=latents,
             )
-
+    
             # 6. Denoising loop
             with self.progress_bar(total=num_inference_steps) as progress_bar:
                 for i, t in enumerate(timesteps):
@@ -994,25 +994,25 @@ OpenVINO.
                     latent_model_input = np.concatenate(
                         [latents] * 2) if do_classifier_free_guidance else latents
                     latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
-
+    
                     result = self.controlnet([latent_model_input, t, text_embeddings, image], share_inputs=True, share_outputs=True)
                     down_and_mid_blok_samples = [sample * controlnet_conditioning_scale for _, sample in result.items()]
-
+    
                     # predict the noise residual
                     noise_pred = self.unet([latent_model_input, t, text_embeddings, *down_and_mid_blok_samples], share_inputs=True, share_outputs=True)[0]
-
+    
                     # perform guidance
                     if do_classifier_free_guidance:
                         noise_pred_uncond, noise_pred_text = noise_pred[0], noise_pred[1]
                         noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
+    
                     # compute the previous noisy sample x_t -> x_t-1
                     latents = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents)).prev_sample.numpy()
                     progress_bar.update()
-
+    
             # 7. Post-processing
             image = self.decode_latents(latents, pad)
-
+    
             # 8. Convert to PIL
             if output_type == "pil":
                 image = self.numpy_to_pil(image)
@@ -1020,13 +1020,13 @@ OpenVINO.
             else:
                 image = [cv2.resize(img, (orig_width, orig_width))
                          for img in image]
-
+    
             return image
-
+    
         def _encode_prompt(self, prompt:Union[str, List[str]], num_images_per_prompt:int = 1, do_classifier_free_guidance:bool = True, negative_prompt:Union[str, List[str]] = None):
             """
             Encodes the prompt into text encoder hidden states.
-
+    
             Parameters:
                 prompt (str or list(str)): prompt to be encoded
                 num_images_per_prompt (int): number of images that should be generated per prompt
@@ -1036,7 +1036,7 @@ OpenVINO.
                 text_embeddings (np.ndarray): text encoder hidden states
             """
             batch_size = len(prompt) if isinstance(prompt, list) else 1
-
+    
             # tokenize input prompts
             text_inputs = self.tokenizer(
                 prompt,
@@ -1046,9 +1046,9 @@ OpenVINO.
                 return_tensors="np",
             )
             text_input_ids = text_inputs.input_ids
-
+    
             text_embeddings = self.text_encoder(text_input_ids, share_inputs=True, share_outputs=True)[0]
-
+    
             # duplicate text embeddings for each generation per prompt
             if num_images_per_prompt != 1:
                 bs_embed, seq_len, _ = text_embeddings.shape
@@ -1056,7 +1056,7 @@ OpenVINO.
                     text_embeddings, (1, num_images_per_prompt, 1))
                 text_embeddings = np.reshape(
                     text_embeddings, (bs_embed * num_images_per_prompt, seq_len, -1))
-
+    
             # get unconditional embeddings for classifier free guidance
             if do_classifier_free_guidance:
                 uncond_tokens: List[str]
@@ -1074,26 +1074,26 @@ OpenVINO.
                     truncation=True,
                     return_tensors="np",
                 )
-
+    
                 uncond_embeddings = self.text_encoder(uncond_input.input_ids, share_inputs=True, share_outputs=True)[0]
-
+    
                 # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
                 seq_len = uncond_embeddings.shape[1]
                 uncond_embeddings = np.tile(uncond_embeddings, (1, num_images_per_prompt, 1))
                 uncond_embeddings = np.reshape(uncond_embeddings, (batch_size * num_images_per_prompt, seq_len, -1))
-
+    
                 # For classifier free guidance, we need to do two forward passes.
                 # Here we concatenate the unconditional and text embeddings into a single batch
                 # to avoid doing two forward passes
                 text_embeddings = np.concatenate([uncond_embeddings, text_embeddings])
-
+    
             return text_embeddings
-
+    
         def prepare_latents(self, batch_size:int, num_channels_latents:int, height:int, width:int, dtype:np.dtype = torch.float32, latents:np.ndarray = None):
             """
-            Preparing noise to image generation. If initial latents are not provided, they will be generated randomly,
+            Preparing noise to image generation. If initial latents are not provided, they will be generated randomly, 
             then prepared latents scaled by the standard deviation required by the scheduler
-
+            
             Parameters:
                batch_size (int): input batch size
                num_channels_latents (int): number of channels for noise generation
@@ -1109,15 +1109,15 @@ OpenVINO.
                 latents = randn_tensor(shape, dtype=dtype)
             else:
                 latents = latents
-
+    
             # scale the initial noise by the standard deviation required by the scheduler
             latents = latents * self.scheduler.init_noise_sigma
             return latents
-
+    
         def decode_latents(self, latents:np.array, pad:Tuple[int]):
             """
             Decode predicted image from latent space using VAE Decoder and unpad image result
-
+            
             Parameters:
                latents (np.ndarray): image encoded in diffusion latent space
                pad (Tuple[int]): each side padding sizes obtained on preprocessing step
@@ -1159,7 +1159,7 @@ the original pipeline scheduler with
 
     from diffusers import LCMScheduler
     from transformers import AutoTokenizer
-
+    
     if not TOKENIZER_PATH.exists():
         tokenizer = AutoTokenizer.from_pretrained(stable_diffusion_id, subfolder="tokenizer")
         tokenizer.save_pretrained(TOKENIZER_PATH)
@@ -1181,16 +1181,16 @@ select device from dropdown list for running inference using OpenVINO
 .. code:: ipython3
 
     import ipywidgets as widgets
-
+    
     core = ov.Core()
-
+    
     device = widgets.Dropdown(
         options=core.available_devices + ["AUTO"],
         value='CPU',
         description='Device:',
         disabled=False,
     )
-
+    
     device
 
 
@@ -1236,7 +1236,7 @@ Let’s see model in action
 
     prompt = "A head full of roses"
     torch.manual_seed(4257)
-
+    
     result = ov_pipe(prompt, control_image, 4)
     result[0]
 
@@ -1307,7 +1307,7 @@ improve model inference speed.
         description='Quantization',
         disabled=is_gpu_device,
     )
-
+    
     to_quantize
 
 Let’s load ``skip magic`` extension to skip quantization if
@@ -1317,9 +1317,9 @@ Let’s load ``skip magic`` extension to skip quantization if
 
     import sys
     sys.path.append("../utils")
-
+    
     int8_pipe = None
-
+    
     %load_ext skip_kernel_extension
 
 Prepare calibration datasets
@@ -1337,32 +1337,32 @@ To collect intermediate model inputs for calibration we should customize
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     import datasets
     from tqdm.notebook import tqdm
     from transformers import set_seed
     from typing import Any, Dict, List
-
+    
     set_seed(1)
-
+    
     class CompiledModelDecorator(ov.CompiledModel):
         def __init__(self, compiled_model, prob: float):
             super().__init__(compiled_model)
             self.data_cache = []
             self.prob = np.clip(prob, 0, 1)
-
+    
         def __call__(self, *args, **kwargs):
             if np.random.rand() >= self.prob:
                 self.data_cache.append(*args)
             return super().__call__(*args, **kwargs)
-
+    
     def collect_calibration_data(pipeline: OVControlNetStableDiffusionPipeline, subset_size: int) -> List[Dict]:
         original_unet = pipeline.unet
         pipeline.unet = CompiledModelDecorator(original_unet, prob=0.3)
-
+    
         dataset = datasets.load_dataset("fusing/instructpix2pix-1000-samples", split="train", streaming=True).shuffle(seed=42)
         pipeline.set_progress_bar_config(disable=True)
-
+    
         # Run inference for data collection
         pbar = tqdm(total=subset_size)
         diff = 0
@@ -1373,7 +1373,7 @@ To collect intermediate model inputs for calibration we should customize
                 continue
             image = batch["input_image"]
             control_image = processor(image)
-
+    
             _ = pipeline(prompt, image=control_image, num_inference_steps=4)
             collected_subset_size = len(pipeline.unet.data_cache)
             control_images.append((min(collected_subset_size, subset_size), control_image))
@@ -1382,7 +1382,7 @@ To collect intermediate model inputs for calibration we should customize
                 break
             pbar.update(collected_subset_size - diff)
             diff = collected_subset_size
-
+    
         control_calibration_dataset = pipeline.unet.data_cache
         pipeline.set_progress_bar_config(disable=False)
         pipeline.unet = original_unet
@@ -1391,7 +1391,7 @@ To collect intermediate model inputs for calibration we should customize
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     CONTROLNET_INT8_OV_PATH = Path("model/controlnet-normalbae_int8.xml")
     UNET_INT8_OV_PATH = Path("model/unet_controlnet_int8.xml")
     if not (CONTROLNET_INT8_OV_PATH.exists() and UNET_INT8_OV_PATH.exists()):
@@ -1411,7 +1411,7 @@ the last ControlNet input is a preprocessed ``control_image``.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     if not CONTROLNET_INT8_OV_PATH.exists():
         control_calibration_data = []
         prev_idx = 0
@@ -1436,9 +1436,9 @@ improvement in SD models and increased quantization time.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     import nncf
-
+    
     if not UNET_INT8_OV_PATH.exists():
         unet = core.read_model(UNET_OV_PATH)
         quantized_unet = nncf.quantize(
@@ -1454,7 +1454,7 @@ improvement in SD models and increased quantization time.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     if not CONTROLNET_INT8_OV_PATH.exists():
         controlnet = core.read_model(CONTROLNET_OV_PATH)
         quantized_controlnet = nncf.quantize(
@@ -1473,9 +1473,9 @@ the same input data.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     from IPython.display import display
-
+    
     int8_pipe = OVControlNetStableDiffusionPipeline(
         tokenizer,
         scheduler,
@@ -1486,12 +1486,12 @@ the same input data.
         VAE_DECODER_OV_PATH,
         device=device.value
     )
-
+    
     prompt = "A head full of roses"
     torch.manual_seed(4257)
-
+    
     int8_result = int8_pipe(prompt, control_image, 4)
-
+    
     fig = visualize_results(result[0], int8_result[0])
     fig.axes[0].set_title('FP16 result', fontsize=15)
     fig.axes[1].set_title('INT8 result', fontsize=15)
@@ -1523,9 +1523,9 @@ pipelines, we use median inference time on calibration subset.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     import time
-
+    
     validation_size = 10
     calibration_dataset = datasets.load_dataset("fusing/instructpix2pix-1000-samples", split="train", streaming=True).take(validation_size)
     validation_data = []
@@ -1534,7 +1534,7 @@ pipelines, we use median inference time on calibration subset.
         image = batch["input_image"]
         control_image = processor(image)
         validation_data.append((prompt, control_image))
-
+    
     def calculate_inference_time(pipeline, calibration_dataset):
         inference_time = []
         pipeline.set_progress_bar_config(disable=True)
@@ -1549,7 +1549,7 @@ pipelines, we use median inference time on calibration subset.
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     fp_latency = calculate_inference_time(ov_pipe, validation_data)
     int8_latency = calculate_inference_time(int8_pipe, validation_data)
     print(f"Performance speed up: {fp_latency / int8_latency:.3f}")
@@ -1568,10 +1568,10 @@ Compare model file sizes
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     fp16_ir_model_size = UNET_OV_PATH.with_suffix(".bin").stat().st_size / 2**20
     quantized_model_size = UNET_INT8_OV_PATH.with_suffix(".bin").stat().st_size / 2**20
-
+    
     print(f"FP16 UNet size: {fp16_ir_model_size:.2f} MB")
     print(f"INT8 UNet size: {quantized_model_size:.2f} MB")
     print(f"UNet compression rate: {fp16_ir_model_size / quantized_model_size:.3f}")
@@ -1587,10 +1587,10 @@ Compare model file sizes
 .. code:: ipython3
 
     %%skip not $to_quantize.value
-
+    
     fp16_ir_model_size = CONTROLNET_OV_PATH.with_suffix(".bin").stat().st_size / 2**20
     quantized_model_size = CONTROLNET_INT8_OV_PATH.with_suffix(".bin").stat().st_size / 2**20
-
+    
     print(f"FP16 ControlNet size: {fp16_ir_model_size:.2f} MB")
     print(f"INT8 ControlNet size: {quantized_model_size:.2f} MB")
     print(f"ControlNet compression rate: {fp16_ir_model_size / quantized_model_size:.3f}")
@@ -1617,9 +1617,9 @@ options for generation: ``Guidance scale``, ``Seed`` and ``Steps``.
 
     import gradio as gr
     MAX_SEED = np.iinfo(np.int32).max
-
+    
     quantized_model_present = int8_pipe is not None
-
+    
     gr.close_all()
     with gr.Blocks() as demo:
         with gr.Row():
@@ -1648,29 +1648,29 @@ options for generation: ``Guidance scale``, ``Seed`` and ``Steps``.
             with gr.Column(visible=quantized_model_present) as quantization_step:
                 int_result = gr.Image(label="Result (Quantized)")
         examples = gr.Examples([["example.png", "a head full of roses"]], [inp_img, inp_prompt])
-
+    
         def extract_normal_map(img):
             if img is None:
                 raise gr.Error("Please upload the image or use one from the examples list")
             return processor(img)
-
+    
         def generate(img, prompt, negative_prompt, seed, num_steps, guidance_scale):
             torch.manual_seed(seed)
             control_img = extract_normal_map(img)
-
+            
             result = ov_pipe(prompt, control_img, num_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt)[0]
             if int8_pipe is not None:
                 torch.manual_seed(seed)
                 int8_result = int8_pipe(prompt, control_img, num_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt)[0]
                 return control_img, result, int8_result
             return control_img, result
-
+    
         output_images = [out_normal, out_result]
         if quantized_model_present:
             output_images.append(int_result)
         btn.click(generate, [inp_img, inp_prompt, inp_neg_prompt, inp_seed, inp_steps, guidance_scale], output_images)
-
-
+    
+    
     try:
         demo.queue().launch(debug=False)
     except Exception:
diff --git a/docs/notebooks/264-qrcode-monster-with-output.rst b/docs/notebooks/264-qrcode-monster-with-output.rst
index 8c806dc5bcbd6d..bc124dc0ff6b14 100644
--- a/docs/notebooks/264-qrcode-monster-with-output.rst
+++ b/docs/notebooks/264-qrcode-monster-with-output.rst
@@ -70,7 +70,7 @@ Prerequisites
 
 .. code:: ipython3
 
-    %pip install -q accelerate diffusers transformers torch gradio qrcode opencv-python --extra-index-url https://download.pytorch.org/whl/cpu
+    %pip install -q accelerate diffusers transformers torch gradio qrcode opencv-python "peft==0.6.2" --extra-index-url https://download.pytorch.org/whl/cpu
     %pip install -q "openvino>=2023.1.0" "nncf>=2.7.0"
 
 Instantiating Generation Pipeline
diff --git a/docs/notebooks/265-wuerstchen-image-generation-with-output.rst b/docs/notebooks/265-wuerstchen-image-generation-with-output.rst
index 665f73c13ba2a6..aaecd5e3e1805c 100644
--- a/docs/notebooks/265-wuerstchen-image-generation-with-output.rst
+++ b/docs/notebooks/265-wuerstchen-image-generation-with-output.rst
@@ -58,7 +58,14 @@ Prerequisites
 
 .. code:: ipython3
 
-    %pip install -q  "diffusers>=0.21.0" transformers accelerate matplotlib gradio "openvino>=2023.2.0" --extra-index-url https://download.pytorch.org/whl/cpu
+    import platform
+
+    if platform.system() != "Windows":
+        %pip install -q "matplotlib>=3.4"
+    else:
+        %pip install -q "matplotlib>=3.4,<3.7"
+
+    %pip install -q  "diffusers>=0.21.0" transformers accelerate gradio "openvino>=2023.2.0" "peft==0.6.2" --extra-index-url https://download.pytorch.org/whl/cpu
     %pip install -q datasets "nncf>=2.7.0"
 
 
diff --git a/docs/notebooks/266-speculative-sampling-with-output.rst b/docs/notebooks/266-speculative-sampling-with-output.rst
index ba55e6027f55bc..d3ffc46f7871dd 100644
--- a/docs/notebooks/266-speculative-sampling-with-output.rst
+++ b/docs/notebooks/266-speculative-sampling-with-output.rst
@@ -72,7 +72,7 @@ useful modules.
 .. code:: ipython3
 
     %pip install -q --upgrade pip
-    %pip install -q --upgrade transformers torch gradio openvino accelerate onnx ipywidgets --extra-index-url https://download.pytorch.org/whl/cpu
+    %pip install -q --upgrade transformers torch gradio openvino accelerate onnx ipywidgets "peft==0.6.2" --extra-index-url https://download.pytorch.org/whl/cpu
     %pip install -q "git+https://github.com/huggingface/optimum-intel.git"
 
 Select inference device
diff --git a/docs/notebooks/267-distil-whisper-asr-with-output.rst b/docs/notebooks/267-distil-whisper-asr-with-output.rst
index 7d870edca33a46..597b9b3ced069a 100644
--- a/docs/notebooks/267-distil-whisper-asr-with-output.rst
+++ b/docs/notebooks/267-distil-whisper-asr-with-output.rst
@@ -74,7 +74,7 @@ Prerequisites
 
 .. code:: ipython3
 
-    %pip install -q "transformers>=4.35" onnx "git+https://github.com/huggingface/optimum-intel.git" --extra-index-url https://download.pytorch.org/whl/cpu
+    %pip install -q "transformers>=4.35" onnx "git+https://github.com/huggingface/optimum-intel.git" "peft==0.6.2" --extra-index-url https://download.pytorch.org/whl/cpu
     %pip install -q "openvino>=2023.2.0" datasets  "gradio>=4.0" "librosa" "soundfile"
     %pip install -q "nncf>=2.6.0" "jiwer"
 
@@ -105,7 +105,7 @@ using tokenizer.
 .. code:: ipython3
 
     import ipywidgets as widgets
-
+    
     model_ids = {
         "Distil-Whisper": [
             "distil-whisper/distil-large-v2",
@@ -126,14 +126,14 @@ using tokenizer.
             "openai/whisper-tiny.en",
         ]
     }
-
+    
     model_type = widgets.Dropdown(
         options=model_ids.keys(),
         value="Distil-Whisper",
         description="Model type:",
         disabled=False,
     )
-
+    
     model_type
 
 .. code:: ipython3
@@ -144,15 +144,15 @@ using tokenizer.
         description="Model:",
         disabled=False,
     )
-
+    
     model_id
 
 .. code:: ipython3
 
     from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq
-
+    
     processor = AutoProcessor.from_pretrained(model_id.value)
-
+    
     pt_model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id.value)
     pt_model.eval();
 
@@ -175,7 +175,7 @@ by Hugging Face datasets implementation.
 .. code:: ipython3
 
     from datasets import load_dataset
-
+    
     def extract_input_features(sample):
         input_features = processor(
             sample["audio"]["array"],
@@ -183,7 +183,7 @@ by Hugging Face datasets implementation.
             return_tensors="pt",
         ).input_features
         return input_features
-
+    
     dataset = load_dataset(
         "hf-internal-testing/librispeech_asr_dummy", "clean", split="validation"
     )
@@ -202,10 +202,10 @@ for decoding predicted token_ids into text transcription.
 .. code:: ipython3
 
     import IPython.display as ipd
-
+    
     predicted_ids = pt_model.generate(input_features)
     transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
-
+    
     display(ipd.Audio(sample["audio"]["array"], rate=sample["audio"]["sampling_rate"]))
     print(f"Reference: {sample['text']}")
     print(f"Result: {transcription[0]}")
@@ -214,7 +214,7 @@ for decoding predicted token_ids into text transcription.
 
 .. raw:: html
 
-
+