pax_global_header 0000666 0000000 0000000 00000000064 14670643317 0014525 g ustar 00root root 0000000 0000000 52 comment=04956f0102e51ca3cbcd35ec2525066f0c5d2322
thinc-release-v9.1.1/ 0000775 0000000 0000000 00000000000 14670643317 0014446 5 ustar 00root root 0000000 0000000 thinc-release-v9.1.1/.github/ 0000775 0000000 0000000 00000000000 14670643317 0016006 5 ustar 00root root 0000000 0000000 thinc-release-v9.1.1/.github/ISSUE_TEMPLATE/ 0000775 0000000 0000000 00000000000 14670643317 0020171 5 ustar 00root root 0000000 0000000 thinc-release-v9.1.1/.github/ISSUE_TEMPLATE/01_bugs.md 0000664 0000000 0000000 00000000730 14670643317 0021753 0 ustar 00root root 0000000 0000000 ---
name: "\U0001F6A8 Submit a Bug Report"
about: Use this template if you came across a bug or unexpected behaviour differing from the docs.
---
## How to reproduce the behaviour
## Your Environment
- Operating System:
- Python Version Used:
- Thinc Version Used:
- Environment Information:
thinc-release-v9.1.1/.github/PULL_REQUEST_TEMPLATE.md 0000664 0000000 0000000 00000001737 14670643317 0021617 0 ustar 00root root 0000000 0000000
## Description
### Types of change
## Checklist
- [ ] I confirm that I have the right to submit this contribution under the project's MIT license.
- [ ] I ran the tests, and all new and existing tests passed.
- [ ] My changes don't require a change to the documentation, or if they do, I've added all required information.
thinc-release-v9.1.1/.github/workflows/ 0000775 0000000 0000000 00000000000 14670643317 0020043 5 ustar 00root root 0000000 0000000 thinc-release-v9.1.1/.github/workflows/cibuildwheel.yml 0000664 0000000 0000000 00000005650 14670643317 0023234 0 ustar 00root root 0000000 0000000 name: Build
on:
push:
tags:
# ytf did they invent their own syntax that's almost regex?
# ** matches 'zero or more of any character'
- 'release-v[0-9]+.[0-9]+.[0-9]+**'
- 'prerelease-v[0-9]+.[0-9]+.[0-9]+**'
jobs:
build_wheels:
name: Build wheels on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
# macos-13 is an intel runner, macos-14 is apple silicon
os: [ubuntu-latest, windows-latest, macos-13, macos-14]
steps:
- uses: actions/checkout@v4
# This is way too slow
# aarch64 (arm) is built via qemu emulation on Linux
#- name: Set up QEMU
# if: runner.os == 'Linux'
# uses: docker/setup-qemu-action@v3
# with:
# platforms: all
- name: Build wheels
uses: pypa/cibuildwheel@v2.19.1
env:
CIBW_ARCHS_LINUX: auto
with:
package-dir: .
output-dir: wheelhouse
config-file: "{package}/pyproject.toml"
- uses: actions/upload-artifact@v4
with:
name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }}
path: ./wheelhouse/*.whl
build_sdist:
name: Build source distribution
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build sdist
run: pipx run build --sdist
- uses: actions/upload-artifact@v4
with:
name: cibw-sdist
path: dist/*.tar.gz
create_release:
needs: [build_wheels, build_sdist]
runs-on: ubuntu-latest
permissions:
contents: write
checks: write
actions: read
issues: read
packages: write
pull-requests: read
repository-projects: read
statuses: read
steps:
- name: Get the tag name and determine if it's a prerelease
id: get_tag_info
run: |
FULL_TAG=${GITHUB_REF#refs/tags/}
if [[ $FULL_TAG == release-* ]]; then
TAG_NAME=${FULL_TAG#release-}
IS_PRERELEASE=false
elif [[ $FULL_TAG == prerelease-* ]]; then
TAG_NAME=${FULL_TAG#prerelease-}
IS_PRERELEASE=true
else
echo "Tag does not match expected patterns" >&2
exit 1
fi
echo "FULL_TAG=$TAG_NAME" >> $GITHUB_ENV
echo "TAG_NAME=$TAG_NAME" >> $GITHUB_ENV
echo "IS_PRERELEASE=$IS_PRERELEASE" >> $GITHUB_ENV
- uses: actions/download-artifact@v4
with:
# unpacks all CIBW artifacts into dist/
pattern: cibw-*
path: dist
merge-multiple: true
- name: Create Draft Release
id: create_release
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
name: ${{ env.TAG_NAME }}
draft: true
prerelease: ${{ env.IS_PRERELEASE }}
files: "./dist/*"
thinc-release-v9.1.1/.github/workflows/explosionbot.yml 0000664 0000000 0000000 00000001414 14670643317 0023313 0 ustar 00root root 0000000 0000000 name: Explosion Bot
on:
issue_comment:
types:
- created
- edited
jobs:
explosion-bot:
runs-on: ubuntu-latest
steps:
- name: Dump GitHub context
env:
GITHUB_CONTEXT: ${{ toJson(github) }}
run: echo "$GITHUB_CONTEXT"
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
- name: Install and run explosion-bot
run: |
pip install git+https://${{ secrets.EXPLOSIONBOT_TOKEN }}@github.com/explosion/explosion-bot
python -m explosionbot
env:
INPUT_TOKEN: ${{ secrets.EXPLOSIONBOT_TOKEN }}
INPUT_BK_TOKEN: ${{ secrets.BUILDKITE_SECRET }}
ENABLED_COMMANDS: "test_gpu,test_slow,test_slow_gpu"
ALLOWED_TEAMS: "spacy-maintainers"
thinc-release-v9.1.1/.github/workflows/issue-manager.yml 0000664 0000000 0000000 00000001242 14670643317 0023325 0 ustar 00root root 0000000 0000000 name: Issue Manager
on:
schedule:
- cron: "0 0 * * *"
issue_comment:
types:
- created
- edited
issues:
types:
- labeled
jobs:
issue-manager:
runs-on: ubuntu-latest
steps:
- uses: tiangolo/issue-manager@0.4.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
config: >
{
"resolved": {
"delay": "P7D",
"message": "This issue has been automatically closed because it was answered and there was no follow-up discussion.",
"remove_label_on_comment": true,
"remove_label_on_close": true
}
}
thinc-release-v9.1.1/.github/workflows/publish_pypi.yml 0000664 0000000 0000000 00000001660 14670643317 0023300 0 ustar 00root root 0000000 0000000 # The cibuildwheel action triggers on creation of a release, this
# triggers on publication.
# The expected workflow is to create a draft release and let the wheels
# upload, and then hit 'publish', which uploads to PyPi.
on:
release:
types:
- published
jobs:
upload_pypi:
runs-on: ubuntu-latest
environment:
name: pypi
url: https://pypi.org/p/thinc
permissions:
id-token: write
contents: read
if: github.event_name == 'release' && github.event.action == 'published'
# or, alternatively, upload to PyPI on every tag starting with 'v' (remove on: release above to use this)
# if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v')
steps:
- uses: robinraju/release-downloader@v1
with:
tag: ${{ github.event.release.tag_name }}
fileName: '*'
out-file-path: 'dist'
- uses: pypa/gh-action-pypi-publish@release/v1
thinc-release-v9.1.1/.github/workflows/tests.yml 0000664 0000000 0000000 00000011363 14670643317 0021734 0 ustar 00root root 0000000 0000000 name: tests
on:
push:
paths-ignore:
- "website/**"
- "*.md"
pull_request:
types: [opened, synchronize, reopened, edited]
paths-ignore:
- "website/**"
- "*.md"
jobs:
validate:
name: Validate
if: github.repository_owner == 'explosion'
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@v4
- name: Configure Python version
uses: actions/setup-python@v5
with:
python-version: "3.9"
- name: black
run: |
python -m pip install black -c requirements.txt
python -m black thinc --check
- name: isort
run: |
python -m pip install isort -c requirements.txt
python -m isort thinc --check
- name: flake8
run: |
python -m pip install flake8==5.0.4
python -m flake8 thinc --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics
tests:
name: Test
needs: Validate
if: github.repository_owner == 'explosion'
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest, macos-13]
python_version: ["3.12"]
include:
- os: windows-latest
python_version: "3.9"
- os: macos-13
python_version: "3.10"
- os: ubuntu-latest
python_version: "3.11"
runs-on: ${{ matrix.os }}
env:
NOTEBOOK_KERNEL: "thinc-notebook-tests"
steps:
- name: Check out repo
uses: actions/checkout@v4
- name: Configure Python version
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python_version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip setuptools wheel
pip install -r requirements.txt
- name: Build sdist
run: |
python setup.py build_ext --inplace
python setup.py sdist --formats=gztar
- name: Run mypy
run: python -m mypy thinc --no-implicit-reexport
- name: Delete source directory
run: rm -rf thinc
shell: bash
- name: Uninstall all packages
run: |
python -m pip freeze
pip freeze --exclude pywin32 > installed.txt
pip uninstall -y -r installed.txt
- name: Install from sdist
run: |
SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1)
PIP_CONSTRAINT="build-constraints.txt" pip install dist/$SDIST
shell: bash
- name: Test import
run: python -c "import thinc"
- name: Install test requirements
run: |
pip install -r requirements.txt
- name: Install notebook test requirements
run: |
pip install ipykernel pydot graphviz
python -m ipykernel install --name thinc-notebook-tests --user
if: matrix.python_version != '3.12'
- name: Run tests without extras
run: |
python -m pytest --pyargs thinc -Werror --cov=thinc --cov-report=term
# TODO: Update for numpy v2
# Notes on numpy requirements hacks:
# 1. torch does not have a direct numpy requirement but is compiled
# against a newer version than the oldest supported numpy for windows and
# python 3.10; this version of numpy would not work with
# tensorflow~=2.5.0 as specified above, but there is no release for
# python 3.10 anyway
# 2. restrict to numpy<1.24.0 due to mxnet incompatibility
# 3. forbid torch!=1.13.0 due to segfaults with numpy<1.24.0
# Note: some of these pip install commands are known to fail for some platforms.
# To continue despite errors as in azure pipelines, remove -e from the default
# bash flags.
#- name: Install extras for testing
# run: |
# #pip install "protobuf~=3.20.0" "tensorflow~=2.5.0"
# #pip install "mxnet; sys_platform != 'win32' and python_version < '3.12'"
# pip install "torch!=1.13.0; sys_platform!='darwin'" --extra-index-url https://download.pytorch.org/whl/cpu
# # there is a bug related to MPS devices in github macos runners that
# # will be fixed in torch v2.1.1
# # https://github.com/pytorch/pytorch/pull/111576
# pip install "torch>=2.1.1; sys_platform=='darwin'" --extra-index-url https://download.pytorch.org/whl/cpu
# #pip install "numpy~=1.23.0; python_version=='3.10' and sys_platform=='win32'"
# #pip install "numpy<1.24.0"
# pip install -r requirements.txt
# pip uninstall -y mypy
# shell: bash --noprofile --norc -o pipefail {0}
##- name: Run tests with extras
# run: python -m pytest --pyargs thinc --cov=thinc --cov-report=term -p thinc.tests.enable_tensorflow
thinc-release-v9.1.1/.gitignore 0000664 0000000 0000000 00000002417 14670643317 0016442 0 ustar 00root root 0000000 0000000 # File extensions
*.pdf
*.aux
*.orig
*.pyo
*.pickle
*.dvi
*.o
*.sqlite
.*.s*
*.dat
# Varia
_paths.py
.mypy_cache
.hypothesis/
version.cc
version.pl
# Tests
tests/model/*
tests/model/
# Website
website/.cache/
website/public/
website/node_modules
website/.npm
website/logs
*.log
npm-debug.log*
website/www/
website/_deploy.sh
*.html
# Cython / C extensions
cythonize.json
*.cpp
*.so
*.so.1
# Vim / VSCode / editors
*.swp
*.swo
*.sw*
Profile.prof
.vscode
.sass-cache
# Python
.Python
.python-version
__pycache__/
.pytest_cache
*.py[cod]
.env/
.env*
.~env/
.venv
env3.*/
venv/
.dev
.denv
.pypyenv
.pytest_cache/
# Distribution / packaging
env/
build/
develop-eggs/
dist/
eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
pip-wheel-metadata/
Pipfile.lock
.installed.cfg
*.egg
.eggs
MANIFEST
# Temporary files
*.~*
tmp/
predict
train
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.cache
nosetests.xml
coverage.xml
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Rope
.ropeproject
# Django stuff:
*.log
*.pot
# Windows
*.bat
Thumbs.db
Desktop.ini
# Mac OS X
*.DS_Store
# Komodo project files
*.komodoproject
# Other
*.tgz
# Pycharm project files
*.idea
# IPython
.ipynb_checkpoints/
thinc-release-v9.1.1/CITATION.cff 0000664 0000000 0000000 00000001127 14670643317 0016341 0 ustar 00root root 0000000 0000000 cff-version: 1.2.0
message: If you use Thinc in research, please use this as a citation.
title: Thinc
abstract: "🔮 A refreshing functional take on deep learning, compatible with your favorite libraries"
authors:
- family-names: "Honnibal"
given-names: "Matthew"
- family-names: "Montani"
given-names: "Ines"
- family-names: "Van Landeghem"
given-names: "Sofie"
- family-names: "Boyd"
given-names: "Adriane"
- family-names: "DuJardin"
given-names: "Justin"
version: 8.0.0
date-released: "2021-01-21"
license: MIT
repository-code: "https://github.com/explosion/thinc"
thinc-release-v9.1.1/LICENSE 0000664 0000000 0000000 00000002143 14670643317 0015453 0 ustar 00root root 0000000 0000000 The MIT License (MIT)
Copyright (C) 2016 ExplosionAI GmbH, 2016 spaCy GmbH, 2015 Matthew Honnibal
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
thinc-release-v9.1.1/MANIFEST.in 0000664 0000000 0000000 00000000336 14670643317 0016206 0 ustar 00root root 0000000 0000000 recursive-include thinc *.cu *.pyx *.pxd *.hh
include LICENSE
include README.md
prune tmp/
include thinc/tests/mypy/configs/*.ini
include thinc/tests/mypy/outputs/*.txt
include thinc/py.typed
recursive-exclude thinc *.cpp
thinc-release-v9.1.1/README.md 0000664 0000000 0000000 00000026352 14670643317 0015735 0 ustar 00root root 0000000 0000000
# Thinc: A refreshing functional take on deep learning, compatible with your favorite libraries
### From the makers of [spaCy](https://spacy.io) and [Prodigy](https://prodi.gy)
[Thinc](https://thinc.ai) is a **lightweight deep learning library** that offers
an elegant, type-checked, functional-programming API for **composing models**,
with support for layers defined in other frameworks such as **PyTorch,
TensorFlow and MXNet**. You can use Thinc as an interface layer, a standalone
toolkit or a flexible way to develop new models. Previous versions of Thinc have
been running quietly in production in thousands of companies, via both
[spaCy](https://spacy.io) and [Prodigy](https://prodi.gy). We wrote the new
version to let users **compose, configure and deploy custom models** built with
their favorite framework.
[](https://github.com/explosion/thinc/actions/workflows/tests.yml)
[](https://github.com/explosion/thinc/releases)
[](https://pypi.python.org/pypi/thinc)
[](https://anaconda.org/conda-forge/thinc)
[](https://github.com/explosion/wheelwright/releases)
[](https://github.com/ambv/black)
[![Open demo in Colab][colab]][intro_to_thinc_colab]
## 🔥 Features
- **Type-check** your model definitions with custom types and
[`mypy`](https://mypy.readthedocs.io/en/latest/) plugin.
- Wrap **PyTorch**, **TensorFlow** and **MXNet** models for use in your network.
- Concise **functional-programming** approach to model definition, using
composition rather than inheritance.
- Optional custom infix notation via **operator overloading**.
- Integrated **config system** to describe trees of objects and hyperparameters.
- Choice of **extensible backends**.
- **[Read more →](https://thinc.ai/docs)**
## 🚀 Quickstart
Thinc is compatible with **Python 3.6+** and runs on **Linux**, **macOS** and
**Windows**. The latest releases with binary wheels are available from
[pip](https://pypi.python.org/pypi/thinc). Before you install Thinc and its
dependencies, make sure that your `pip`, `setuptools` and `wheel` are up to
date. For the most recent releases, pip 19.3 or newer is recommended.
```bash
pip install -U pip setuptools wheel
pip install thinc
```
See the [extended installation docs](https://thinc.ai/docs/install#extended) for
details on optional dependencies for different backends and GPU. You might also
want to
[set up static type checking](https://thinc.ai/docs/install#type-checking) to
take advantage of Thinc's type system.
> ⚠️ If you have installed PyTorch and you are using Python 3.7+, uninstall the
> package `dataclasses` with `pip uninstall dataclasses`, since it may have been
> installed by PyTorch and is incompatible with Python 3.7+.
### 📓 Selected examples and notebooks
Also see the [`/examples`](examples) directory and
[usage documentation](https://thinc.ai/docs) for more examples. Most examples
are Jupyter notebooks – to launch them on
[Google Colab](https://colab.research.google.com) (with GPU support!) click on
the button next to the notebook name.
| Notebook | Description |
| --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| [`intro_to_thinc`][intro_to_thinc]
[![Open in Colab][colab]][intro_to_thinc_colab] | Everything you need to know to get started. Composing and training a model on the MNIST data, using config files, registering custom functions and wrapping PyTorch, TensorFlow and MXNet models. |
| [`transformers_tagger_bert`][transformers_tagger_bert]
[![Open in Colab][colab]][transformers_tagger_bert_colab] | How to use Thinc, `transformers` and PyTorch to train a part-of-speech tagger. From model definition and config to the training loop. |
| [`pos_tagger_basic_cnn`][pos_tagger_basic_cnn]
[![Open in Colab][colab]][pos_tagger_basic_cnn_colab] | Implementing and training a basic CNN for part-of-speech tagging model without external dependencies and using different levels of Thinc's config system. |
| [`parallel_training_ray`][parallel_training_ray]
[![Open in Colab][colab]][parallel_training_ray_colab] | How to set up synchronous and asynchronous parameter server training with Thinc and [Ray](https://ray.readthedocs.io/en/latest/). |
**[View more →](examples)**
[colab]:
https://gistcdn.githack.com/ines/dcf354aa71a7665ae19871d7fd14a4e0/raw/461fc1f61a7bc5860f943cd4b6bcfabb8c8906e7/colab-badge.svg
[intro_to_thinc]: examples/00_intro_to_thinc.ipynb
[intro_to_thinc_colab]:
https://colab.research.google.com/github/explosion/thinc/blob/master/examples/00_intro_to_thinc.ipynb
[transformers_tagger_bert]: examples/02_transformers_tagger_bert.ipynb
[transformers_tagger_bert_colab]:
https://colab.research.google.com/github/explosion/thinc/blob/master/examples/02_transformers_tagger_bert.ipynb
[pos_tagger_basic_cnn]: examples/03_pos_tagger_basic_cnn.ipynb
[pos_tagger_basic_cnn_colab]:
https://colab.research.google.com/github/explosion/thinc/blob/master/examples/03_pos_tagger_basic_cnn.ipynb
[parallel_training_ray]: examples/04_parallel_training_ray.ipynb
[parallel_training_ray_colab]:
https://colab.research.google.com/github/explosion/thinc/blob/master/examples/04_parallel_training_ray.ipynb
### 📖 Documentation & usage guides
| Documentation | Description |
| --------------------------------------------------------------------------------- | ----------------------------------------------------- |
| [Introduction](https://thinc.ai/docs) | Everything you need to know. |
| [Concept & Design](https://thinc.ai/docs/concept) | Thinc's conceptual model and how it works. |
| [Defining and using models](https://thinc.ai/docs/usage-models) | How to compose models and update state. |
| [Configuration system](https://thinc.ai/docs/usage-config) | Thinc's config system and function registry. |
| [Integrating PyTorch, TensorFlow & MXNet](https://thinc.ai/docs/usage-frameworks) | Interoperability with machine learning frameworks |
| [Layers API](https://thinc.ai/docs/api-layers) | Weights layers, transforms, combinators and wrappers. |
| [Type Checking](https://thinc.ai/docs/usage-type-checking) | Type-check your model definitions and more. |
## 🗺 What's where
| Module | Description |
| ----------------------------------------- | --------------------------------------------------------------------------------- |
| [`thinc.api`](thinc/api.py) | **User-facing API.** All classes and functions should be imported from here. |
| [`thinc.types`](thinc/types.py) | Custom [types and dataclasses](https://thinc.ai/docs/api-types). |
| [`thinc.model`](thinc/model.py) | The `Model` class. All Thinc models are an instance (not a subclass) of `Model`. |
| [`thinc.layers`](thinc/layers) | The layers. Each layer is implemented in its own module. |
| [`thinc.shims`](thinc/shims) | Interface for external models implemented in PyTorch, TensorFlow etc. |
| [`thinc.loss`](thinc/loss.py) | Functions to calculate losses. |
| [`thinc.optimizers`](thinc/optimizers.py) | Functions to create optimizers. Currently supports "vanilla" SGD, Adam and RAdam. |
| [`thinc.schedules`](thinc/schedules.py) | Generators for different rates, schedules, decays or series. |
| [`thinc.backends`](thinc/backends) | Backends for `numpy` and `cupy`. |
| [`thinc.config`](thinc/config.py) | Config parsing and validation and function registry system. |
| [`thinc.util`](thinc/util.py) | Utilities and helper functions. |
## 🐍 Development notes
Thinc uses [`black`](https://github.com/psf/black) for auto-formatting,
[`flake8`](http://flake8.pycqa.org/en/latest/) for linting and
[`mypy`](https://mypy.readthedocs.io/en/latest/) for type checking. All code is
written compatible with **Python 3.6+**, with type hints wherever possible. See
the [type reference](https://thinc.ai/docs/api-types) for more details on
Thinc's custom types.
### 👷♀️ Building Thinc from source
Building Thinc from source requires the full dependencies listed in
[`requirements.txt`](requirements.txt) to be installed. You'll also need a
compiler to build the C extensions.
```bash
git clone https://github.com/explosion/thinc
cd thinc
python -m venv .env
source .env/bin/activate
pip install -U pip setuptools wheel
pip install -r requirements.txt
pip install --no-build-isolation .
```
Alternatively, install in editable mode:
```bash
pip install -r requirements.txt
pip install --no-build-isolation --editable .
```
Or by setting `PYTHONPATH`:
```bash
export PYTHONPATH=`pwd`
pip install -r requirements.txt
python setup.py build_ext --inplace
```
### 🚦 Running tests
Thinc comes with an [extensive test suite](thinc/tests). The following should
all pass and not report any warnings or errors:
```bash
python -m pytest thinc # test suite
python -m mypy thinc # type checks
python -m flake8 thinc # linting
```
To view test coverage, you can run `python -m pytest thinc --cov=thinc`. We aim
for a 100% test coverage. This doesn't mean that we meticulously write tests for
every single line – we ignore blocks that are not relevant or difficult to test
and make sure that the tests execute all code paths.
thinc-release-v9.1.1/bin/ 0000775 0000000 0000000 00000000000 14670643317 0015216 5 ustar 00root root 0000000 0000000 thinc-release-v9.1.1/bin/push-tag.sh 0000775 0000000 0000000 00000000554 14670643317 0017311 0 ustar 00root root 0000000 0000000 #!/usr/bin/env bash
set -e
# Insist repository is clean
git diff-index --quiet HEAD
git checkout $1
git pull origin $1
git push origin $1
version=$(grep "__version__ = " thinc/about.py)
version=${version/__version__ = }
version=${version/\'/}
version=${version/\'/}
version=${version/\"/}
version=${version/\"/}
git tag "v$version"
git push origin "v$version"
thinc-release-v9.1.1/build-constraints.txt 0000664 0000000 0000000 00000000155 14670643317 0020654 0 ustar 00root root 0000000 0000000 # build version constraints for use with wheelwright + multibuild
numpy>=2.0.0,<3.0.0; python_version>='3.9'
thinc-release-v9.1.1/examples/ 0000775 0000000 0000000 00000000000 14670643317 0016264 5 ustar 00root root 0000000 0000000 thinc-release-v9.1.1/examples/00_intro_to_thinc.ipynb 0000664 0000000 0000000 00001464362 14670643317 0022670 0 ustar 00root root 0000000 0000000 {
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "4q-j8ckEGcit"
},
"source": [
"# Intro to Thinc for beginners: defining a simple model and config & wrapping PyTorch, TensorFlow and MXNet\n",
"\n",
"This example shows how to get started with Thinc, using the \"hello world\" of neural network models: recognizing handwritten digits from the [MNIST dataset](http://yann.lecun.com/exdb/mnist/). For comparison, here's the same model implemented in other frameworks: [PyTorch version](https://github.com/pytorch/examples/blob/master/mnist/main.py), [TensorFlow version](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/mnist/mnist.py). In this notebook, we'll walk through **creating and training the model**, using **config files**, registering **custom functions** and **wrapping models** defined in PyTorch, TensorFlow and MXNet. This tutorial is aimed at beginners, but it assumes basic knowledge of machine learning concepts and terminology."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "FVWFT2nnGci4",
"outputId": "ee32dc55-5d02-473e-dc5b-2422dc1e34db"
},
"outputs": [],
"source": [
"!pip install \"thinc>=8.2.0\" \"ml_datasets>=0.2.0\" \"tqdm>=4.41\""
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "vUsjn12zberE"
},
"source": [
"There are also some optional extras to install, depending on whether you want to run this on GPU, and depending on which of the integrations you want to test."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "l310yc2IawXM",
"scrolled": false
},
"outputs": [],
"source": [
"import thinc.util\n",
"# If you want to run this notebook on GPU, you'll need to install cupy.\n",
"if not thinc.util.has_cupy:\n",
" !pip install \"cupy-cuda101\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "a23GlJVJa0T-",
"scrolled": true
},
"outputs": [],
"source": [
"import thinc.util\n",
"# If you want to try out the tensorflow integration, you'll need to install that.\n",
"# You'll either need to do tensorflow or tensorflow-gpu, depending on your\n",
"# requirements.\n",
"if not thinc.util.has_tensorflow:\n",
" !pip install \"tensorflow-gpu>=2\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "PpL4s7hEbHhJ"
},
"outputs": [],
"source": [
"import thinc.util\n",
"# If you want to try out the PyTorch integration, you'll need to install it.\n",
"if not thinc.util.has_torch:\n",
" !pip install \"torch\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "LTOk4rfwbOSS"
},
"outputs": [],
"source": [
"import thinc.util\n",
"# If you want to try out the MxNet integration, you'll need to install it.\n",
"if not thinc.util.has_mxnet:\n",
" !pip install \"mxnet>=1.5.1,<1.6.0\""
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "GhyglLtPGci7"
},
"source": [
"If you're running the notebook on GPU, the first thing to do is use Thinc's `prefer_gpu` helper to make sure we're performing operations **on GPU if available**. The function should be called right after importing Thinc, and it returns a boolean indicating whether the GPU has been activated. If you want to test out an integration with another library, you should check that it can access the GPU too."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "2xkReqDJGci8",
"outputId": "9492ddc7-6c34-4f39-b781-e5df6ed06709"
},
"outputs": [],
"source": [
"from thinc.api import prefer_gpu\n",
"import thinc.util\n",
"print(\"Thinc GPU?\", prefer_gpu())\n",
"\n",
"if thinc.util.has_tensorflow:\n",
" import tensorflow as tf\n",
" print(\"Tensorflow GPU?\", bool(tf.config.experimental.list_physical_devices('GPU')))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "BZVtDmLCGci8"
},
"source": [
"We’ve prepared a separate package [`ml-datasets`](https://github.com/explosion/ml-datasets) with loaders for some common datasets, including MNIST. So we can set up the data as follows:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "qctw-vbZGci_",
"outputId": "8f866627-0b2d-4392-eb61-7a7fc7c2e797"
},
"outputs": [],
"source": [
"import ml_datasets\n",
"(train_X, train_Y), (dev_X, dev_Y) = ml_datasets.mnist()\n",
"print(f\"Training size={len(train_X)}, dev size={len(dev_X)}\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "p8jv7snxGcjB"
},
"source": [
"Now let’s define a model with two **Relu-activated hidden layers**, followed by a **softmax-activated output layer**. We’ll also add **dropout** after the two hidden layers, to help the model generalize better. The `chain` combinator is like `Sequential` in PyTorch or Keras: it combines a list of layers together with a feed-forward relationship."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "hrhydeJSGcjB"
},
"outputs": [],
"source": [
"from thinc.api import chain, Relu, Softmax\n",
" \n",
"n_hidden = 32\n",
"dropout = 0.2\n",
"\n",
"model = chain(\n",
" Relu(nO=n_hidden, dropout=dropout), \n",
" Relu(nO=n_hidden, dropout=dropout), \n",
" Softmax()\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "gmIH7Xr5GcjD"
},
"source": [
"After creating the model, we can call the `Model.initialize` method, passing in a small batch of input data `X` and a small batch of output data `Y`. This allows Thinc to **infer the missing dimensions**: when we defined the model, we didn’t tell it the input size `nI` or the output size `nO`. When passing in the data, make sure it is on the right device by calling `model.ops.asarray` which will e.g. transform the arrays to `cupy` when running on GPU."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "ODzZCkuSGcjD",
"outputId": "b1c178db-b297-4cf7-e135-596dae2a7a75"
},
"outputs": [],
"source": [
"# making sure the data is on the right device\n",
"train_X = model.ops.asarray(train_X)\n",
"train_Y = model.ops.asarray(train_Y)\n",
"dev_X = model.ops.asarray(dev_X)\n",
"dev_Y = model.ops.asarray(dev_Y)\n",
"\n",
"model.initialize(X=train_X[:5], Y=train_Y[:5])\n",
"nI = model.get_dim(\"nI\")\n",
"nO = model.get_dim(\"nO\")\n",
"print(f\"Initialized model with input dimension nI={nI} and output dimension nO={nO}\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "th0djZu7GcjE"
},
"source": [
"Next we need to create an **optimizer**, and make several passes over the data, randomly selecting paired batches of the inputs and labels each time. While some machine learning libraries provide a single `.fit()` method to train a model all at once, Thinc puts you in charge of **shuffling and batching your data**, with the help of a few handy utility methods. `model.ops.xp` is an instance of either `numpy` or `cupy`, depending on whether you run the code on CPU or GPU."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 219,
"referenced_widgets": [
"d9ba3c88398d4e12a674eca10a8633ff",
"2637352462d94fe795a409fbbbe70926",
"d266fcfaaa9741b1a4be560f094ac1d2",
"cd54b80e170c4a6c94fe419c2cf8bcd6",
"db2b8e98efcd4a7599b7bbd57c55e70c",
"71522233b5b64b3dbd29ec69e47788d8",
"81a8b3ff6d644f2c83f04a835bef5333",
"4c60aa21f05a4d678b2135d4d1f2c99a",
"7f3517a0116846708d65a947e534de05",
"2fa0fd59f8844a4e99eda19fc4151962",
"ca4f1e1dbbbd46618b40291f94ef1102",
"3ab377ee24544e2a95433448a8331ed1",
"5fe58d7f655243c8b9d29f31a0eb7b53",
"b1548f9f9f4642ef814e9d3565002501",
"7fcc9f928ddd4f52a9d420862b9be6ef",
"70898e759e6945a1a479dcb2d434b50d",
"6ae1111eff4043f294a7bbafcb5de189",
"0443d3d3c8564616a9cae2616c370a72",
"f3abedf22ff649b4b90e85fc68f93c67",
"d4f5f106a2ed488fbba2b626bbd72714",
"d904c7aebabb477a99f41e95f1b0b9cf",
"e82d78f382b343c384bb7895daa18c82",
"b9862082c1cf47d1bb6057537571499d",
"ebebf706cc9549d9803b796ea2ef2853",
"b50ff7a73c0f4a6da93443ca019ba785",
"0a9b4f19291b403dadeca3938004561e",
"e2a09b4280db464f863a1ae66d58ea02",
"7f60da0694d343fca576f9cfaf7352ac",
"5ac87cfe946d483b8088df90b22490d8",
"5d84a716b7ed4ad8927ee8ea8493e4db",
"ea6cbc2071af4c018f96e5846f328a1f",
"db3333497eba4ee1a63225b4c6bf468c",
"e3ddfbf3678a41ab9926976cf1beee85",
"a16d1171acfe48b5abfd3652d0cf119f",
"7ae0b9e9e6964641a7d529b03d60b034",
"4a9b8833512a4f07a4411b1e8c2ca0c8",
"05de1b15d6244f82bcef782c8f73f8a8",
"84fb3096f29f40bf9f4d20bca7608c05",
"aa1f805540a74c7380bd2b333b9ecc88",
"a0b0dd366b424f03adde1e0698b8b31f",
"86c4306d45c04ce783d40f202c30c350",
"2be93e1e76224f61afd058eea4583a05",
"6f8967c159b1474ab706026b6becd9a1",
"5c5c9d38d86c4d2486c547a3add4a04c",
"6da928e744da4d0bb1cbb1df33c201ee",
"017d16a6d32b4d6a9f868a57cd736568",
"eb2cfbd11882434e9373f36d62d06258",
"ceefe7af1738490d9414c6c690eb4681",
"fc6d6eef364446c3b54b68c5c4c1cb1b",
"662835d152984f4eb87fa7cef0c0e28d",
"1de0cf05f59b49eb8f8d233b9a4e490e",
"9bf763a02b58454ab1e4f9598939fd79",
"af32974b6ed54e9da7b89fe35a749640",
"aa7c50235a9f43fa96a7d9ef7be20861",
"407a6771394a4e69a504ea5b67164a64",
"e0077b7268614d85b4a08ad07183a2e4",
"1a394e179354494487365d1a5242e2d3",
"e567c933e82146758cb690a190f1199c",
"35181a9eb7bc4967984c327a8b6f7f8f",
"3cf4e5fcc4a64c13b100093fb12e2cd2",
"b826f8a9468d4c23b35f6021791885b6",
"016371ed2b1e43d29e7542dc20f9ef64",
"70537ee37fd645c8ba0d3e88fc43fdb5",
"6db4cb7968194992819296b341c378f1",
"ad58dbfc6a294f3f9dc89c4a3f9658dc",
"8f521dc0bd094a3da7cffd4da51ce7e0",
"0eea906841464f978a262640badc0fae",
"11daf0a19032495ab15cdda96b0030e7",
"6565004456e24a88a2e1e04ebe7004df",
"a63ede007e94496c884527478a9b0d17",
"e6b70306bfea4bd09f17eac179387a96",
"1e34b8465bc84f94b13a3d7a94d5f9ce",
"c0db04b51d0c4e9e9081c0169cca1f4e",
"cb35834c71e24301abd3574d82e017b9",
"80016285573540f998aeec24132c0bb3",
"f999eff152854b6da98defded43c8818",
"ebfa4d5418d84b3ea8fad62dfabce7c1",
"265c8c05122c406fa6b603070e2ca7bd",
"85e62f6e0211431a9f6513285ef1aa0b",
"92ae4ede68904e2f8b0b2322f099d32b"
]
},
"id": "jnqyIuNgGcjF",
"outputId": "5e4cecd4-8fd1-4608-ed8e-7ddfed1a7762"
},
"outputs": [],
"source": [
"from thinc.api import Adam, fix_random_seed\n",
"from tqdm.notebook import tqdm\n",
"\n",
"fix_random_seed(0)\n",
"optimizer = Adam(0.001)\n",
"batch_size = 128\n",
"print(\"Measuring performance across iterations:\")\n",
"\n",
"for i in range(10):\n",
" batches = model.ops.multibatch(batch_size, train_X, train_Y, shuffle=True)\n",
" for X, Y in tqdm(batches, leave=False):\n",
" Yh, backprop = model.begin_update(X)\n",
" backprop(Yh - Y)\n",
" model.finish_update(optimizer)\n",
" # Evaluate and print progress\n",
" correct = 0\n",
" total = 0\n",
" for X, Y in model.ops.multibatch(batch_size, dev_X, dev_Y):\n",
" Yh = model.predict(X)\n",
" correct += (Yh.argmax(axis=1) == Y.argmax(axis=1)).sum()\n",
" total += Yh.shape[0]\n",
" score = correct / total\n",
" print(f\" {i} {float(score):.3f}\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "FkcctgKzGcjG"
},
"source": [
"Let's wrap the training code in a function, so we can reuse it later:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "6VRxmeipGcjH"
},
"outputs": [],
"source": [
"def train_model(data, model, optimizer, n_iter, batch_size):\n",
" (train_X, train_Y), (dev_X, dev_Y) = data\n",
" indices = model.ops.xp.arange(train_X.shape[0], dtype=\"i\")\n",
" for i in range(n_iter):\n",
" batches = model.ops.multibatch(batch_size, train_X, train_Y, shuffle=True)\n",
" for X, Y in tqdm(batches, leave=False):\n",
" Yh, backprop = model.begin_update(X)\n",
" backprop(Yh - Y)\n",
" model.finish_update(optimizer)\n",
" # Evaluate and print progress\n",
" correct = 0\n",
" total = 0\n",
" for X, Y in model.ops.multibatch(batch_size, dev_X, dev_Y):\n",
" Yh = model.predict(X)\n",
" correct += (Yh.argmax(axis=1) == Y.argmax(axis=1)).sum()\n",
" total += Yh.shape[0]\n",
" score = correct / total\n",
" print(f\" {i} {float(score):.3f}\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "X6tmwMlsGcjH"
},
"source": [
"### Operator overloading for more concise model definitions\n",
"\n",
"Thinc allows you to **overload operators** and bind arbitrary functions to Python operators like `+`, `*`, but also `>>` or `@`. The `Model.define_operators` contextmanager takes a dict of operators mapped to functions – typically combinators like `chain`. The operators are only valid for the `with` block. This lets us define the model like this:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "eZlHX5j5GcjH"
},
"outputs": [],
"source": [
"from thinc.api import Model, chain, Relu, Softmax\n",
" \n",
"n_hidden = 32\n",
"dropout = 0.2\n",
"\n",
"with Model.define_operators({\">>\": chain}):\n",
" model = Relu(nO=n_hidden, dropout=dropout) >> Relu(nO=n_hidden, dropout=dropout) >> Softmax()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "liNMnvKzGcjH"
},
"source": [
"If your model definitions are very complex, mapping combinators to operators can help you keep the code readable and concise. You can find more examples of model definitions with overloaded operators [in the docs](https://thinc.ai/docs). (Also note that you don't _have to_ use this syntax!)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "HTFBXZxXGcjI"
},
"source": [
"---\n",
"\n",
"## Using config files\n",
"\n",
"Configuration is a huge problem for machine learning code, because you may want to expose almost any detail of any function as a hyperparameter. The setting you want to expose might be arbitrarily far down in your call stack. Default values also become hard to change without breaking backwards compatibility.\n",
"\n",
"To solve this problem, Thinc provides a config system that lets you easily describe **arbitrary trees of objects**. The objects can be created via function calls you register using a simple decorator syntax. The config can include values like hyperparameters or training settings (whatever you need), or references to functions and the values of their arguments. Thinc will then construct the config **bottom-up** – so you can define one function with its arguments, and then pass the return value into another function.\n",
"\n",
"> 💡 You can keep the config as a string in your Python script, or save it to a file like `config.cfg`. To load a config from a string, you can use `Config.from_str`. To load from a file, you can use `Config.from_disk`. The following examples all use strings so we can include them in the notebook."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "BEvOL8kWGcjI",
"outputId": "b6d12471-0128-497c-e67a-1b10f2d1e43b"
},
"outputs": [],
"source": [
"from thinc.api import Config, registry\n",
"\n",
"EXAMPLE_CONFIG1 = \"\"\"\n",
"[hyper_params]\n",
"learn_rate = 0.001\n",
"\n",
"[optimizer]\n",
"@optimizers = \"Adam.v1\"\n",
"learn_rate = ${hyper_params:learn_rate}\n",
"\"\"\"\n",
"\n",
"config1 = Config().from_str(EXAMPLE_CONFIG1)\n",
"config1"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "olHiDguEGcjJ"
},
"source": [
"When you open the config with `Config.from_str`, Thinc will parse it as a dict and fill in the references to values defined in other sections. For example, `${hyper_params:learn_rate}` is substituted with `0.001`. \n",
"\n",
"Keys starting with `@` are references to **registered functions**. For example, `@optimizers = \"Adam.v1\"` refers to the function registered under the name `\"Adam.v1\"`, a function creating an Adam optimizer. The function takes one argument, the `learn_rate`. Calling `registry.resolve` will resolve the config and create the functions it defines."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "61kSjIhqGcjJ",
"outputId": "f5a28ea7-174f-465b-9847-e0286a07e910"
},
"outputs": [],
"source": [
"loaded_config1 = registry.resolve(config1)\n",
"loaded_config1"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "U7_Pfg62GcjJ"
},
"source": [
"If function arguments are missing or have incompatible types, Thinc will raise an error and tell you what's wrong. Configs can also define **nested blocks** using the `.` notation. In this example, `optimizer.learn_rate` defines the `learn_rate` argument of the `optimizer` block. Instead of a float, the learning rate can also be a generator – for instance, a linear warm-up rate:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "XqEL3yoVGcjJ",
"outputId": "007060a7-c387-4eab-e8be-0573069257fc"
},
"outputs": [],
"source": [
"EXAMPLE_CONFIG2 = \"\"\"\n",
"[optimizer]\n",
"@optimizers = \"Adam.v1\"\n",
"\n",
"[optimizer.learn_rate]\n",
"@schedules = \"warmup_linear.v1\"\n",
"initial_rate = 2e-5\n",
"warmup_steps = 1000\n",
"total_steps = 10000\n",
"\"\"\"\n",
"\n",
"config2 = Config().from_str(EXAMPLE_CONFIG2)\n",
"config2"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "_q6MvvG9GcjK"
},
"source": [
"Calling `registry.resolve` will now construct the objects bottom-up: first, it will create the schedule with the given arguments. Next, it will create the optimizer and pass in the schedule as the `learn_rate` argument."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "u9wjWVwLGcjK",
"outputId": "4bdbd58d-44e1-417a-9ee6-b6dc1925d5ef"
},
"outputs": [],
"source": [
"loaded_config2 = registry.resolve(config2)\n",
"loaded_config2"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "WRw-cbKVGcjL"
},
"source": [
"This gives you a loaded optimizer using the settings defined in the config, which you can then use in your script. How you set up your config and what you do with the result is **entirely up to you**. Thinc just gives you a dictionary of objects back and makes no assumptions about what they _\"mean\"_. This means that you can also choose the names of the config sections – the only thing that needs to stay consistent are the names of the function arguments."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "oKno-aCHGcjM"
},
"source": [
"### Configuring the MNIST model\n",
"\n",
"Here's a config describing the model we defined above. The values in the `hyper_params` section can be referenced in other sections to keep them consistent. The `*` is used for **positional arguments** – in this case, the arguments to the `chain` function, two Relu layers and one softmax layer."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "Gm762LNgGcjM",
"outputId": "dc6bc047-3b1f-4f82-edd5-ba6cc7fc7c26"
},
"outputs": [],
"source": [
"CONFIG = \"\"\"\n",
"[hyper_params]\n",
"n_hidden = 32\n",
"dropout = 0.2\n",
"learn_rate = 0.001\n",
"\n",
"[model]\n",
"@layers = \"chain.v1\"\n",
"\n",
"[model.*.relu1]\n",
"@layers = \"Relu.v1\"\n",
"nO = ${hyper_params:n_hidden}\n",
"dropout = ${hyper_params:dropout}\n",
"\n",
"[model.*.relu2]\n",
"@layers = \"Relu.v1\"\n",
"nO = ${hyper_params:n_hidden}\n",
"dropout = ${hyper_params:dropout}\n",
"\n",
"[model.*.softmax]\n",
"@layers = \"Softmax.v1\"\n",
"\n",
"[optimizer]\n",
"@optimizers = \"Adam.v1\"\n",
"learn_rate = ${hyper_params:learn_rate}\n",
"\n",
"[training]\n",
"n_iter = 10\n",
"batch_size = 128\n",
"\"\"\"\n",
"\n",
"config = Config().from_str(CONFIG)\n",
"config"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "z1KGVcQQGcjM",
"outputId": "960ee075-0ecc-4289-929b-61eb5ea93b38"
},
"outputs": [],
"source": [
"loaded_config = registry.resolve(config)\n",
"loaded_config"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "irXmLRk4GcjN"
},
"source": [
"When you call `registry.resolve`, Thinc will first create the three layers using the specified arguments populated by the hyperparameters. It will then pass the return values (the layer objects) to `chain`. It will also create an optimizer. All other values, like the training config, will be passed through as a regular dict. Your training code can now look like this:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 201,
"referenced_widgets": [
"a5ac2b15a8ae40ce92160a960414279c",
"b52a071acc34438d849dbc7adcc91631",
"43b0d557889248f287edd4edf50e3a9b",
"99c3aedf530c44d69f3c145efc7da6da",
"773cbd3194e142c694f73fd52ee6aa8c",
"7aebabcd7cd546e989eb0c148ebb1348",
"199cccb400aa4fc59a1f95317db0ae2b",
"9024d8c79164492e80170ca565f2e367",
"14717f9c7a3f4bbc97978023bad0d876",
"d7d3d6ec8a144738b61b59e8e1d37f1e",
"4f27062765bd41f589215a61e2d0840a",
"f5f4c5a71a9c41f88fafc9abea500f14",
"95f5d01ae2524dd2bcfab4945816b6a4",
"3edb84e7ef724a1887df6dcf31f75448",
"c918030a689a4842b727d9b7ecbe39ae",
"eac2608b4c0e4ad9ac81838ad7d68657",
"21f9b00564ea47ffa4523dd9a8eec4e9",
"6f2523ef7d0243b38975e5b657b43dd9",
"34c8794ff06d4875aec3ecaacb7f11bd",
"05eaa0a6003243bab5378655193332c0",
"31ef257d727346478de228f6cbd27021",
"4a97428d8e9944d5b2a00466e6a80c53",
"2e0751da78e14c88bfaf8b45f30fcd05",
"4b433703db1a41baa3e7e9f45d72aa57",
"b668e892e9cb4571b173390e78d8d786",
"7e569ac0f5974f4aaed7cf2e953bc316",
"79574d891d35493da77efbbad6fff11f",
"583d96a84b3642f4b864c7ae7ab6f598",
"3563f3d4033b4ac7b43279cf13d8bdd2",
"989fb7b65c504a1aad8ec0f53e2e43c3",
"ee11cbb4ec3d4042b6c62536a477c10a",
"5978e50fc32f4fe1ab99fbfc42e87c89",
"3bbd206a74794ed4a5500b7bafe546ea",
"d6f5cf60a40c4825bfca5a960056d078",
"3161c8b70d9748c5a1400c0fe543d442",
"421fbeb9dd4c422999932f7dcd210da7",
"7326a18ab885419691c5bfc3666154bc",
"3a1091487eac40cdac4046173caaaaf3",
"0ed86bd7b55f49bcb9927be28a279a49",
"d64df8ba0ab04aad9e1c2c4551a0b7d9",
"565b64b2df2f4342bdb56ad34b468de2",
"08b6e41233db46a6911d22fe531832f5",
"6699fda35d5b495da6052e53f4e24a01",
"649359bad86d415094308180006a0e11",
"f82fd9ccee6b423c8bdbc830ba0f8a93",
"afe82cf54da249cb815dc3ece7bac260",
"c1a8a841a5124a6eb364863a2fc11447",
"a13d290a9ddf41deb2f258c617dfd2e3",
"110b5a48694c48059985490cae348dc6",
"4c61f8b9314c4bec8a0c3a5ecab3356d",
"c99185c8dfbe47aab9e29a90b92c63a0",
"4553ca36932d4a20b898d7fa7b23bcc3",
"0829771aa327491e8b7d05ef2b084175",
"1687134f713c4c039365c0a66d1aab82",
"27a1f038ed8a41dfb4d18175f80aab7a",
"b32999bf8b8148e98bbbb6ba1acb4548",
"000b5ea0ab444688bf195d6c810a9ee4",
"5c9d8559f7134842ab00b99d28bfd644",
"630b657a06384d7abd3b63bcf02c3a3e",
"ac609c059aac45bba0ea7ee76e057977",
"88a24010610d4a19a3fb81eddfcfaf52",
"f7dd4ce672014ce6ba3d7d50ff090c9e",
"fdd43b573d18496a9eec5f61632b33ed",
"318b1b7bb5064c5aabd39540bf2d7bf2",
"53065cc4a41d4652bfb986ae1d28ce9b",
"5cc3f44e4660481d920e9120fd0d254a",
"b7740ddd81034150bca1bf841f43af31",
"c330191809e44832a89abcc6c4a6fd5d",
"9e2a1ec730be42d38a3f04b967ed0b88",
"583c6058061240838ad63f2a8ddf9a00",
"e731309213c94e92be1a389d13a190f5",
"110f5508e00e4ec6b5c84877abc9c265",
"80538e2ccd7f490987a79d4c70c9571b",
"50b4abff394846eabd6f6fdcac8a7762",
"d17516de5c52450abfd79af7db04a282",
"584cadfb0a6c480f94aad4b9842c4d25",
"22a455a7b3f841a9b01bc83dee7717cc",
"3c80e2934eda4d19825a76c6400a3823",
"a991cedbd4b24c139430a06369ad527f",
"490a7c3043e24e378892816de13970eb"
]
},
"id": "1kjJ79orGcjN",
"outputId": "c62b4795-f5a3-4257-d695-871fa4af1d08"
},
"outputs": [],
"source": [
"model = loaded_config[\"model\"]\n",
"optimizer = loaded_config[\"optimizer\"]\n",
"n_iter = loaded_config[\"training\"][\"n_iter\"]\n",
"batch_size = loaded_config[\"training\"][\"batch_size\"]\n",
"\n",
"model.initialize(X=train_X[:5], Y=train_Y[:5])\n",
"train_model(((train_X, train_Y), (dev_X, dev_Y)), model, optimizer, n_iter, batch_size)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "vcH1GbRnGcjN"
},
"source": [
"If you want to change a hyperparamter or experiment with a different optimizer, all you need to change is the config. For each experiment you run, you can save a config and you'll be able to reproduce it later."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ml9jX5SNGcjO"
},
"source": [
"---\n",
"\n",
"## Programming via config vs. registering custom functions\n",
"\n",
"The config system is very powerful and lets you define complex relationships, including model definitions with levels of nested layers. However, it's not always a good idea to program entirely in your config – this just replaces one problem (messy and hard to maintain code) with another one (messy and hard to maintain configs). So ultimately, it's about finding the **best possible trade-off**.\n",
"\n",
"If you've written a layer or model definition you're happy with, you can use Thinc's function registry to register it and assign it a string name. Your function can take any arguments that can later be defined in the config. Adding **type hints** ensures that config settings will be **parsed and validated** before they're passed into the function, so you don't end up with incompatible settings and confusing failures later on. Here's the MNIST model, defined as a custom layer:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "xgWuX6ceGcjO"
},
"outputs": [],
"source": [
"import thinc\n",
"\n",
"@thinc.registry.layers(\"MNIST.v1\")\n",
"def create_mnist(nO: int, dropout: float):\n",
" return chain(\n",
" Relu(nO, dropout=dropout), \n",
" Relu(nO, dropout=dropout), \n",
" Softmax()\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "o5hWEjL8GcjQ"
},
"source": [
"In the config, we can now refer to it by name and set its arguments. This makes the config maintainable and compact, while still allowing you to change and record the hyperparameters."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "LsWrE-lyGcjR",
"outputId": "28a9411c-0cc0-401d-9c3a-a5fae0410bde"
},
"outputs": [],
"source": [
"CONFIG2 = \"\"\"\n",
"[model]\n",
"@layers = \"MNIST.v1\"\n",
"nO = 32\n",
"dropout = 0.2\n",
"\n",
"[optimizer]\n",
"@optimizers = \"Adam.v1\"\n",
"learn_rate = 0.001\n",
"\n",
"[training]\n",
"n_iter = 10\n",
"batch_size = 128\n",
"\"\"\"\n",
"\n",
"config = Config().from_str(CONFIG2)\n",
"config"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "jqQLLw-gGcjS",
"outputId": "8d30cbed-1a3e-4a6a-8893-cf353d15fa3c"
},
"outputs": [],
"source": [
"loaded_config = registry.resolve(config)\n",
"loaded_config"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "kqYF8mP8GcjS"
},
"source": [
"If you don't want to hard-code the dataset being used, you can also wrap it in a registry function. This lets you refer to it by name in the config, and makes it easy to swap it out. In your config, you can then load the data in its own section, or as a subsection of `training`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "vz3IxOetGcjS"
},
"outputs": [],
"source": [
"@thinc.registry.datasets(\"mnist_data.v1\")\n",
"def mnist():\n",
" return ml_datasets.mnist()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "suiQ2psrGcjT",
"outputId": "4e72a0dc-b33b-4c5e-90f1-1dfa8f311b65",
"scrolled": true
},
"outputs": [],
"source": [
"CONFIG3 = \"\"\"\n",
"[model]\n",
"@layers = \"MNIST.v1\"\n",
"nO = 32\n",
"dropout = 0.2\n",
"\n",
"[optimizer]\n",
"@optimizers = \"Adam.v1\"\n",
"learn_rate = 0.001\n",
"\n",
"[training]\n",
"n_iter = 10\n",
"batch_size = 128\n",
"\n",
"[training.data]\n",
"@datasets = \"mnist_data.v1\"\n",
"\"\"\"\n",
"\n",
"config = Config().from_str(CONFIG3)\n",
"loaded_config = registry.resolve(config)\n",
"loaded_config"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 201,
"referenced_widgets": [
"f160728d6cfc4cd380fafce5b253d2c7",
"b9db25806c4a410db0004f77a1acaee5",
"0b2d03b2d0f6447293576ff8e9ed9ea3",
"f839ef53446f4fa9834edc8da44f1327",
"cc7f3e43ff504effb6fcbd63bac5546f",
"dc4f419827134de0b96b9b020fa8a788",
"68b365d2d435430aae5af88ecdae39a5",
"0459c93679e54f9db4e7009420b8a612",
"99fd455414304cb5a2a08077c2182452",
"65729f9c7457426eb006da855388182b",
"449ba373a94641f2ac9078d8f98c3f1a",
"fa591816c3414db3a7dc1478649ac230",
"3b06657d55a44d2486a3d65af18286d6",
"14cf79b923404000b5eb2762d161ec78",
"d673cd4b0e384aee988f565ab73cb022",
"829ad0cbf2bb49d9bde04dcff1f9cb3c",
"47a3f9dad5e2486b98ddd34b7bcf1201",
"9bc1cd208dfd4a55bd52a4d0f399117c",
"a27c932db2c84a86a75a92af92a07f0b",
"8d73c0c46c944eb89e95a9efb0a891a6",
"a183401dedf140979852bfc28122d689",
"8b2e77ece9f14c37ad8efab38442acbb",
"cf424d3cddcb4e469f4c88d1df70dedb",
"2c303e0697ec4cca9e949207f89d7695",
"b3664609744546988f30152c49d02ac8",
"3e395e497d8f4f10812b2af65ccac13b",
"247a4869ae564aefa501053956083a6a",
"5ac7bb8fbf0048c492fc4f6c8b69d828",
"099e1066f01c4b1780cf20a0700414a5",
"2cdf06e67ced403795e3ba594fbf3c55",
"2c6a4ebe2c914b028062eb3651de99e3",
"fdd481739b6a4d64ac739965c621d3b8",
"97e1cf826ec54af79646bee305e23601",
"c8de4a1a7be449e68787a4dcdc94801d",
"1c2657c607454a0eb329ae8663665e82",
"1c4cfdd4f52a4db182cd08357b3d3abe",
"db4cab1a1150482c861e22d6e7b2be09",
"eaa7c96f7ee849948f10033289dfbd05",
"d23105e4cc4141b5937063dc03e9d565",
"ef3638ddf37547fe8359b8f4333dc2e8",
"6b3e7a3ba015416cada97bb79d4c8fb1",
"a780eb0b79ff40e9966a72a3c5a81eca",
"0a9276f6ae784e63a6daaecdd2bc7e64",
"0046f7bffaa6438d8cf31a0d558c4b46",
"fce1dcea8fd749569b29e353a086ad45",
"34d176a20c5e429a8852cb3e2d3bb507",
"0a3730284d6a4110bf5247d537a24262",
"efb84c9861764c61b09ad02f170d8530",
"d98b018eb67447eca644cd5fa56f2723",
"8ff0e222da2b47d99e8931080555cb7e",
"a887772c5344437aa896b3ed1bd5208f",
"734031c5bb6e4e7d9e30f1a15861924f",
"e5caa26da44e411f891a71b85c46c87a",
"e53b1041df93415c9852da6d8a05b7d3",
"34237141f8854d33af9223e75e390ba5",
"8a0e36efabb04f869ba2c080446525da",
"e28f8422f22b4ef29ec9f6cffe959ecd",
"03edea4af2724453bfbb6b8e2e74cda2",
"dd43258264da429a9aeb315f1715b7db",
"43d68f9905354442bad801fb61bfd02a",
"861a02913c8346f2a2c7c7c049364acd",
"4bccad99024f4d77a23b657d9dc7746d",
"591e3bae35404f68b24f607ece97a9d3",
"19b5d9601cb94873b3f550214d586577",
"dba0b00af12d410cb8dfd4be838b173a",
"b34c1730161e41c5bb901128ccb4ff65",
"bce5275ceea044c3ba0f1635b0657ac4",
"7048d397ed0647a19587c17deec944bb",
"caff75ab1b874f9da109674579faadf3",
"1017871f46b44c9cb3f976d1af607ffc",
"32272e5a9e194bed9ffe5f16a3ae1d61",
"a4823e3708aa4ccba43e3547a229095e",
"ab99a2cf8919454bb780cc24088f2b85",
"9c7baaf83e734b86b2234c735dca859d",
"fddd7fb9ef3f4535a97775548abbabd1",
"fa68877e48ab41a48a3c10dc3fe329d1",
"32ff8a0bfbcb44ebb0c80363f6115e07",
"3f0d72d9c4984f89a82862fcdd24643b",
"96e832df4dec40d9a97bd129ef55effa",
"3e448a3c76bc4fdfa57f82327411f2e2"
]
},
"id": "TYYXzyYNGcjT",
"outputId": "edaaf25e-5e9f-43a5-8a6a-04bd5b612317"
},
"outputs": [],
"source": [
"model = loaded_config[\"model\"]\n",
"optimizer = loaded_config[\"optimizer\"]\n",
"n_iter = loaded_config[\"training\"][\"n_iter\"]\n",
"batch_size = loaded_config[\"training\"][\"batch_size\"]\n",
"(train_X, train_Y), (dev_X, dev_Y) = loaded_config[\"training\"][\"data\"]\n",
"\n",
"# After loading the data from config, they might still need to be moved to the right device\n",
"train_X = model.ops.asarray(train_X)\n",
"train_Y = model.ops.asarray(train_Y)\n",
"dev_X = model.ops.asarray(dev_X)\n",
"dev_Y = model.ops.asarray(dev_Y)\n",
"\n",
"model.initialize(X=train_X[:5], Y=train_Y[:5])\n",
"train_model(((train_X, train_Y), (dev_X, dev_Y)), model, optimizer, n_iter, batch_size)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "46MPK6-XGcjT"
},
"source": [
"---\n",
"\n",
"## Wrapping TensorFlow, PyTorch and MXNet models\n",
"\n",
"The previous example showed how to define the model directly in Thinc, which is pretty straightforward. But you can also define your model using a **machine learning library of your choice** and wrap it as a Thinc model. This gives your layers a unified interface so you can easily mix and match them, and also lets you take advantage of the config system and type hints. Thinc currently ships with built-in wrappers for [PyTorch](https://pytorch.org), [TensorFlow](https://tensorflow.org) and [MXNet](https://mxnet.apache.org/)."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "S9fK_9mAGcjT"
},
"source": [
"### Wrapping TensorFlow models\n",
"\n",
"Here's the same model definition in TensorFlow: a `Sequential` layer (equivalent of Thinc's `chain`) with two Relu layers and dropout, and an output layer with a softmax activation. Thinc's `TensorFlowWrapper` wraps the model and turns it into a regular Thinc `Model`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "IkgBI5wbGcjU",
"outputId": "65179036-11ea-430e-e54a-3ee0d02dcdad"
},
"outputs": [],
"source": [
"from tensorflow.keras.layers import Dense, Dropout\n",
"from tensorflow.keras.models import Sequential\n",
"from thinc.api import enable_tensorflow, TensorFlowWrapper, Adam\n",
"enable_tensorflow()\n",
"\n",
"width = 32\n",
"nO = 10\n",
"nI = 784\n",
"dropout = 0.2\n",
"\n",
"tf_model = Sequential()\n",
"tf_model.add(Dense(width, activation=\"relu\", input_shape=(nI,)))\n",
"tf_model.add(Dropout(dropout))\n",
"tf_model.add(Dense(width, activation=\"relu\", input_shape=(nI,)))\n",
"tf_model.add(Dropout(dropout))\n",
"tf_model.add(Dense(nO, activation=\"softmax\"))\n",
"\n",
"wrapped_tf_model = TensorFlowWrapper(tf_model)\n",
"wrapped_tf_model"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "FVG18Ew_GcjU"
},
"source": [
"You can now use the same training code to train the model:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 257,
"referenced_widgets": [
"7ed4511c19b24c7fadf283d962a612a2",
"7e44024663ce4bd4833c4dcdf2d63d14",
"4b2f4fa05e7a465eacf809489243b2a8",
"a3426fcb07904ca4be130a71b330c77a",
"df732c32eb1340a890c181eed7358452",
"e18cf1aef5834391ae7410dcb90f1c59",
"759a4d04fe92425ab0d6d8681d3ed0c6",
"8a25115e9fef433e90b70e1ff3bbe341",
"8b296000cd0446e2952bff859b785260",
"f6f53ccede894d5fa859543bcd5d07a1",
"26fa7101f97f4aed98a774c83e802bc9",
"3a4fc2bfed8549d980a4884715d9c650",
"a21851e9b7b24098b3fbbe6d9f1f8e9c",
"a73abf53972a4913bf2c50b0d341fc13",
"6d23bd6b099746028b353af12380d226",
"2233981c90214b7cae8d5bfc7410802b",
"4faac8b7bdd748a7b908d46ed058aa3a",
"995ac4803a534d0883a15702eae4fc50",
"8584d1e513e7496192af75d2cf4f84c0",
"943c772ee3fe43cea3d05cea3f7f12bb",
"74632c71eef34180a7dba0c22fd9df80",
"dbd113a86ad44f0b95261ed9f6e6c93c",
"054a82c3f2dc4de9864b83dcd1a2abd8",
"f4fd9c138d8c4192afdb4324afe46771",
"0d95eef69a7d42418cc79e4cf4d8a3cd",
"84ddb87806a94da8902e3da8a06aa7e5",
"61540f7e300545eebbd697227a3b3d7d",
"c9892de0df614a84b8ba7a6e259ffe09",
"a08bba98a4994866949e19f6104e6991",
"d74548d2826c45cba4e730b3a9638fd4",
"fac6ce9bd41f43f9821f45741359f5f4",
"86ea34c601f447f18eaaf30fd1711f77",
"8c639957ced245318db4865da9e0c1df",
"0cdbaa847609462aa3163c49db61a7cd",
"7b5bf62263a64441b7424aab153415be",
"370044cae56f42fd992e4c4ce3240f68",
"7eb7725598674b13b049a370bb505ea7",
"d1758fb53b404e3f9b8f317be75684b4",
"8dde5fd1fe4347c7bea1c51c47b14e5e",
"ca1968d9a9f7462e88c9a051cb7a6777",
"db1b1bff50974cd0823dcccecd2ae1c4",
"fad451dd3ae040a8aede364098400642",
"e47127013eaa48979dc86657839ba227",
"eb6a91e79a1b4998bc2751d3e572b05f",
"0b62d25712a2403ab488914a7109ab47",
"ff6267b3dd63479a8e260848a30709a8",
"dd52397ee21b49f999121fb8a62ea4bf",
"8404870254bf4dfa80ce1a11acaa5969",
"e9228023d62243569a1e2e06a1cf94b5",
"f08fd096dcb743faafc6dbc604b45989",
"c808822cdef941a3879bf1cc2b32720a",
"e4e73b9dcadd4bacb50d03152e7d81e8",
"c8967d63161647d4a9e778765d3e7ac2",
"570de138283348be9b7575b51df33eea",
"6b89100361a443bd85a01f8e47374c96",
"6485d6b9b194425f8eb28b779d7d6e9a",
"5bd005f55b724af4893bb2d0718d9408",
"2e5a7bb281d840e6a4522a4dc602fd80",
"4005b0b6131a400e927343af9a20e154",
"87a3fcd0fd8d4df1b885b1e106d085a8",
"493826515d934872b80d127fbf8e6a85",
"594aebcc9478429d9a5063327c9953bc",
"7411b7336cab4e8f92347df274e93876",
"7e91135b95964e35a11bd312983f31c3",
"ab756a05d67241baabcb4ee84b35efe3",
"9b16033c7b7a47568a0d078d708cf28d",
"de806eb1cca0447c92917881ec18d49a",
"00a9eda2268e47baaae96926bb7d8c51",
"64cc1e2f91a44b218a7824aedd373b31",
"e9c21ca7465449d3a8930234e9ed8dcf",
"b775d1f8fbae434185d52dafd207b592",
"0fc3d74e4d4a4ac8b057342c0a017394",
"02a62e897a334ff88aa5282286de0bab",
"2cbcffe4ddd8450db4b259d368516d9d",
"880dce5bac5148988119a4c0f7296c83",
"6790ba4beb8a4bae8f9a14f0cca3b5c9",
"6cba2abf076d483ba8847ccbcf537c52",
"c2fa2316330f4edc83a8e3e4de571cf3",
"f55d98fcc3524d94ba73b3192ee19281",
"21f755e5d9774eb3aab7604140054a6d"
]
},
"id": "mu3vRO95GcjU",
"outputId": "cfb89aaf-581a-4bc6-f13e-aa7d8990d81b"
},
"outputs": [],
"source": [
"data = ml_datasets.mnist()\n",
"optimizer = Adam(0.001)\n",
"wrapped_tf_model.initialize(X=train_X[:5], Y=train_Y[:5])\n",
"train_model(data, wrapped_tf_model, optimizer, n_iter=10, batch_size=128)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "YxiSA-iwGcjV"
},
"source": [
"### Wrapping PyTorch models\n",
"\n",
"Here's the PyTorch version. Thinc's `PyTorchWrapper` wraps the model and turns it into a regular Thinc `Model`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "rBiDnLGhGcjW",
"outputId": "a208e5b6-8dc9-460a-f98a-b3d14186c4dc",
"scrolled": true
},
"outputs": [],
"source": [
"import torch\n",
"import torch.nn\n",
"import torch.nn.functional as F\n",
"from thinc.api import PyTorchWrapper, Adam\n",
"\n",
"\n",
"width = 32\n",
"nO = 10\n",
"nI = 784\n",
"dropout = 0.2\n",
"\n",
"\n",
"class PyTorchModel(torch.nn.Module):\n",
" def __init__(self, width, nO, nI, dropout):\n",
" super(PyTorchModel, self).__init__()\n",
" self.dropout1 = torch.nn.Dropout2d(dropout)\n",
" self.dropout2 = torch.nn.Dropout2d(dropout)\n",
" self.fc1 = torch.nn.Linear(nI, width)\n",
" self.fc2 = torch.nn.Linear(width, nO)\n",
"\n",
" def forward(self, x):\n",
" x = F.relu(x)\n",
" x = self.dropout1(x)\n",
" x = self.fc1(x)\n",
" x = F.relu(x)\n",
" x = self.dropout2(x)\n",
" x = self.fc2(x)\n",
" output = F.log_softmax(x, dim=1)\n",
" return output\n",
" \n",
"wrapped_pt_model = PyTorchWrapper(PyTorchModel(width, nO, nI, dropout))\n",
"wrapped_pt_model"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "9DHValebGcjX"
},
"source": [
"You can now use the same training code to train the model:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 201,
"referenced_widgets": [
"9bce5f414ec043509c06f10840cc2d0f",
"47fd69df905945a09183f89126c665f0",
"50ded217cb2440249cd86a4dcc0e1b7a",
"e1229e07527b4a67bb7de71715ed10ca",
"987b752181564435ab5a7f8d838651f5",
"009991c634044a2c8dfb58aa7da342a6",
"5ee467d84bd24e5db0507a994273ea28",
"6bc78efbc98a4cb5ad04647f893e2f1a",
"973c950b92d14614a5df848d2a8c024e",
"a702297c0c7e40b08546a7669dfb6917",
"637ac772584f4f169d59c0964be8acfd",
"dd2ea7e5c59f4a3a807a7f8048eafb1e",
"fe0223b47b9346e7ad056b86841f91d9",
"e32b04ebb4454ca2848b799045c30320",
"608cfc45e4ec4579b75999043e62ab19",
"69f5f6ef84e44fc284dd9be06beaaf65",
"4381199cd6fd46e98175bc65f3283a07",
"f8c7cd8112274e26996a56ecae42ec3f",
"ad1659c9381a4afca67aac81d1470776",
"28f837093edd4e60bde32ec8d23b7497",
"f0c3740bcfb04fa6a12e1f1025aff42f",
"191e3c3e02b44eda9ebe4859325830e5",
"b7341f8df10a4e1ab8c3cbb475de0d82",
"de11572a1baa4efabaa5c1436af92f06",
"8619ba70e84f4bda9d5c966863d0d4e3",
"0c037dcc8bbb4f38977b3e516d3a2014",
"977ab80b593849389b224d18aefb6814",
"d62d3423df7f49d18b45bb87e9919ade",
"473ece24383049f6964082046f5a8cef",
"a859b046e9e84f518b8c4aa449fb75cc",
"50d48e02aa744ad8adbccce94148b060",
"b3f89680663b466f9dda719fa6843707",
"faa57a96c1e7416fa9a9a41b5bc6e308",
"b4979a909ae64f48888d2383285f4f68",
"732824b0216e44f7a88da251febff435",
"c5b2f89f03264317aa734883c164cabb",
"40b7a89564684301ae357cae6c517be4",
"b91537eaa9ba44f8952cb65696d236fe",
"79c6ecc409764fc78294fcbd9319c3c0",
"b31897c725c946e8a08a2ef0e80b6520",
"cb24ce5968194d92bf77463a59eea46b",
"5bfe581cc5db4002b38c89df6b245720",
"aa86d75f3dba49a481c2c15167387b99",
"e83ab25fcda94a90bdd58143c3624263",
"82076a20084549719b917b071d1d1b91",
"49bbc050955c4ddd931efc9a468faccb",
"ae7ec72f5d064a6a80433613be03f849",
"5eb4571729ea4b92857d3ba3c575721a",
"1cea7c77162d42b2a464aeb98f86d36d",
"66fc9d3aad8f478c8884e74990c9f2f9",
"15062403d526469e910691b9b59f06bd",
"aa3e1f1cf64a47af9aa0930b855c14bc",
"b875c88f7ae74f8391ac58fc31a3aabc",
"4c6f9216f6b14905bbfeffd81e14c973",
"550caabed61042e1b89dd8a826897b77",
"27ca20cabb94467492f29713fad334dd",
"70bb47f183e24e7c805d8b5e89ce1b3e",
"838598dc75fb4836b3f4d769d68de597",
"c2b02e00355041919382a3c23c0d6850",
"6e396b43955349b28a280ccb1d41c716",
"9f6277814410407fae19e3aee971c997",
"1480b195483d454c844c9e6a724b02b3",
"501c6677916d40f9afaea8f48be244fe",
"2e78f48b6298490885474f27bcbf2928",
"df901f8348654af087a63b43ec822b9e",
"dffdd7d2ddf24707a1b2fb0df00cf957",
"3fb34090b5b34d6fa8f039440e839fbd",
"46796020aa1145fc87c7f458ee41e14e",
"3b153a8becf5411c871221d482680caa",
"0701da5b35604eaebcecf87335ad324c",
"76c193b0729b4a57b1664a94163bb38a",
"877c9e4a3541425eac50c9250e5ff568",
"4e2c73f549d94202a34b4a1046d011e5",
"847aaa75aecc40f3bc9d67c37cf03362",
"8c10956546bd46cc94c322044b217a1a",
"cfa1762b9dd740f39d4dc3f198abdcc6",
"5edf3c8cd0f64046ac298d48a93ef32e",
"77325a33c7b6495e9301c771dbf935de",
"ce87dfd22b554e0a83b264913f7156c6",
"2eb23201ad354f3ba8a0720757d4d8bd"
]
},
"id": "OCXFDVIHGcjX",
"outputId": "8fad70f4-c323-46b0-de52-2d4b65aed7b1",
"scrolled": true
},
"outputs": [],
"source": [
"data = ml_datasets.mnist()\n",
"optimizer = Adam(0.001)\n",
"wrapped_pt_model.initialize(X=train_X[:5], Y=train_Y[:5])\n",
"train_model(data, wrapped_pt_model, optimizer, n_iter=10, batch_size=128)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "E91d8Wm3GcjX"
},
"source": [
"### Wrapping MXNet models\n",
"\n",
"Here's the MXNet version. Thinc's `MXNetWrapper` wraps the model and turns it into a regular Thinc `Model`.\n",
"\n",
"MXNet doesn't provide a `Softmax` layer but a `.softmax()` operation/method for prediction and it integrates an internal softmax during training. So to be able to integrate it with the rest of the components, you combine it with a `Softmax()` Thinc layer using the `chain` combinator. Make sure you `initialize()` the MXNet model *and* the Thinc model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "6KU34c-1GcjY",
"outputId": "5c5db8b7-9a5d-4652-f1c9-4efac0d0e1a2"
},
"outputs": [],
"source": [
"from mxnet.gluon.nn import Dense, Sequential, Dropout\n",
"from thinc.api import enable_mxnet, MXNetWrapper, chain, Softmax\n",
"import thinc.util\n",
"enable_mxnet()\n",
"\n",
"assert thinc.util.has_mxnet\n",
"\n",
"width = 32\n",
"nO = 10\n",
"nI = 784\n",
"dropout = 0.2\n",
"\n",
"mx_model = Sequential()\n",
"mx_model.add(Dense(width, activation=\"relu\"))\n",
"mx_model.add(Dropout(dropout))\n",
"mx_model.add(Dense(width, activation=\"relu\"))\n",
"mx_model.add(Dropout(dropout))\n",
"mx_model.add(Dense(nO))\n",
"mx_model.initialize()\n",
"wrapped_mx_model = chain(MXNetWrapper(mx_model), Softmax())\n",
"wrapped_mx_model"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "V9ySHavEGcjY"
},
"source": [
"And train it the same way:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 227
},
"id": "SeE6CPUKGcjY",
"outputId": "9616cc90-2164-4da5-d3fd-928c7d47b726"
},
"outputs": [],
"source": [
"data = ml_datasets.mnist()\n",
"optimizer = Adam(0.001)\n",
"wrapped_mx_model.initialize(X=train_X[:5], Y=train_Y[:5])\n",
"train_model(data, wrapped_mx_model, optimizer, n_iter=10, batch_size=128)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "v3uPO9ZYGcjY"
},
"source": [
"---\n",
"\n",
"## Documentation and resources\n",
"\n",
"- USAGE [Configuration files](https://thinc.ai/docs/usage-config)\n",
"- USAGE [Defining and using models](https://thinc.ai/docs/usage-models)\n",
"- USAGE [Using Thinc with PyTorch, TensorFlow & MXNet](https://thinc.ai/docs/usage-frameworks)\n",
"- API [Available layers and combinators](https://thinc.ai/docs/api-layers)\n",
"- API [`Config` and `registry`](https://thinc.ai/docs/api-config)\n",
"- API [`Model` class](https://thinc.ai/docs/api-model)"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"name": "00_intro_to_thinc.ipynb",
"provenance": []
},
"file_extension": ".py",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
},
"mimetype": "text/x-python",
"name": "python",
"npconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": 3,
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"000b5ea0ab444688bf195d6c810a9ee4": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_630b657a06384d7abd3b63bcf02c3a3e",
"IPY_MODEL_ac609c059aac45bba0ea7ee76e057977"
],
"layout": "IPY_MODEL_5c9d8559f7134842ab00b99d28bfd644"
}
},
"0046f7bffaa6438d8cf31a0d558c4b46": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_efb84c9861764c61b09ad02f170d8530",
"placeholder": "",
"style": "IPY_MODEL_0a3730284d6a4110bf5247d537a24262",
"value": " 422/422 [00:01<00:00, 307.85it/s]"
}
},
"009991c634044a2c8dfb58aa7da342a6": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"00a9eda2268e47baaae96926bb7d8c51": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_0fc3d74e4d4a4ac8b057342c0a017394",
"placeholder": "",
"style": "IPY_MODEL_b775d1f8fbae434185d52dafd207b592",
"value": " 422/422 [00:04<00:00, 106.76it/s]"
}
},
"016371ed2b1e43d29e7542dc20f9ef64": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"017d16a6d32b4d6a9f868a57cd736568": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"02a62e897a334ff88aa5282286de0bab": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_880dce5bac5148988119a4c0f7296c83",
"IPY_MODEL_6790ba4beb8a4bae8f9a14f0cca3b5c9"
],
"layout": "IPY_MODEL_2cbcffe4ddd8450db4b259d368516d9d"
}
},
"03edea4af2724453bfbb6b8e2e74cda2": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"0443d3d3c8564616a9cae2616c370a72": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"0459c93679e54f9db4e7009420b8a612": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"054a82c3f2dc4de9864b83dcd1a2abd8": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"05de1b15d6244f82bcef782c8f73f8a8": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"05eaa0a6003243bab5378655193332c0": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_4b433703db1a41baa3e7e9f45d72aa57",
"placeholder": "",
"style": "IPY_MODEL_2e0751da78e14c88bfaf8b45f30fcd05",
"value": " 422/422 [00:01<00:00, 303.55it/s]"
}
},
"0701da5b35604eaebcecf87335ad324c": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"0829771aa327491e8b7d05ef2b084175": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"08b6e41233db46a6911d22fe531832f5": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"099e1066f01c4b1780cf20a0700414a5": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"0a3730284d6a4110bf5247d537a24262": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"0a9276f6ae784e63a6daaecdd2bc7e64": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_34d176a20c5e429a8852cb3e2d3bb507",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_fce1dcea8fd749569b29e353a086ad45",
"value": 422
}
},
"0a9b4f19291b403dadeca3938004561e": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"0b2d03b2d0f6447293576ff8e9ed9ea3": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_dc4f419827134de0b96b9b020fa8a788",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_cc7f3e43ff504effb6fcbd63bac5546f",
"value": 422
}
},
"0b62d25712a2403ab488914a7109ab47": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"0c037dcc8bbb4f38977b3e516d3a2014": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"0cdbaa847609462aa3163c49db61a7cd": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"0d95eef69a7d42418cc79e4cf4d8a3cd": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_61540f7e300545eebbd697227a3b3d7d",
"IPY_MODEL_c9892de0df614a84b8ba7a6e259ffe09"
],
"layout": "IPY_MODEL_84ddb87806a94da8902e3da8a06aa7e5"
}
},
"0ed86bd7b55f49bcb9927be28a279a49": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"0eea906841464f978a262640badc0fae": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_a63ede007e94496c884527478a9b0d17",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_6565004456e24a88a2e1e04ebe7004df",
"value": 422
}
},
"0fc3d74e4d4a4ac8b057342c0a017394": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"1017871f46b44c9cb3f976d1af607ffc": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"110b5a48694c48059985490cae348dc6": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_c99185c8dfbe47aab9e29a90b92c63a0",
"IPY_MODEL_4553ca36932d4a20b898d7fa7b23bcc3"
],
"layout": "IPY_MODEL_4c61f8b9314c4bec8a0c3a5ecab3356d"
}
},
"110f5508e00e4ec6b5c84877abc9c265": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"11daf0a19032495ab15cdda96b0030e7": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_1e34b8465bc84f94b13a3d7a94d5f9ce",
"placeholder": "",
"style": "IPY_MODEL_e6b70306bfea4bd09f17eac179387a96",
"value": " 422/422 [00:01<00:00, 311.51it/s]"
}
},
"14717f9c7a3f4bbc97978023bad0d876": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_4f27062765bd41f589215a61e2d0840a",
"IPY_MODEL_f5f4c5a71a9c41f88fafc9abea500f14"
],
"layout": "IPY_MODEL_d7d3d6ec8a144738b61b59e8e1d37f1e"
}
},
"1480b195483d454c844c9e6a724b02b3": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"14cf79b923404000b5eb2762d161ec78": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"15062403d526469e910691b9b59f06bd": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_4c6f9216f6b14905bbfeffd81e14c973",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_b875c88f7ae74f8391ac58fc31a3aabc",
"value": 422
}
},
"1687134f713c4c039365c0a66d1aab82": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"191e3c3e02b44eda9ebe4859325830e5": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"199cccb400aa4fc59a1f95317db0ae2b": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"19b5d9601cb94873b3f550214d586577": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"1a394e179354494487365d1a5242e2d3": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_35181a9eb7bc4967984c327a8b6f7f8f",
"IPY_MODEL_3cf4e5fcc4a64c13b100093fb12e2cd2"
],
"layout": "IPY_MODEL_e567c933e82146758cb690a190f1199c"
}
},
"1c2657c607454a0eb329ae8663665e82": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_eaa7c96f7ee849948f10033289dfbd05",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_db4cab1a1150482c861e22d6e7b2be09",
"value": 422
}
},
"1c4cfdd4f52a4db182cd08357b3d3abe": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_ef3638ddf37547fe8359b8f4333dc2e8",
"placeholder": "",
"style": "IPY_MODEL_d23105e4cc4141b5937063dc03e9d565",
"value": " 422/422 [00:01<00:00, 304.76it/s]"
}
},
"1cea7c77162d42b2a464aeb98f86d36d": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_15062403d526469e910691b9b59f06bd",
"IPY_MODEL_aa3e1f1cf64a47af9aa0930b855c14bc"
],
"layout": "IPY_MODEL_66fc9d3aad8f478c8884e74990c9f2f9"
}
},
"1de0cf05f59b49eb8f8d233b9a4e490e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_aa7c50235a9f43fa96a7d9ef7be20861",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_af32974b6ed54e9da7b89fe35a749640",
"value": 422
}
},
"1e34b8465bc84f94b13a3d7a94d5f9ce": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"21f755e5d9774eb3aab7604140054a6d": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"21f9b00564ea47ffa4523dd9a8eec4e9": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_34c8794ff06d4875aec3ecaacb7f11bd",
"IPY_MODEL_05eaa0a6003243bab5378655193332c0"
],
"layout": "IPY_MODEL_6f2523ef7d0243b38975e5b657b43dd9"
}
},
"2233981c90214b7cae8d5bfc7410802b": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"22a455a7b3f841a9b01bc83dee7717cc": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"247a4869ae564aefa501053956083a6a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_2cdf06e67ced403795e3ba594fbf3c55",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_099e1066f01c4b1780cf20a0700414a5",
"value": 422
}
},
"2637352462d94fe795a409fbbbe70926": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"265c8c05122c406fa6b603070e2ca7bd": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"26fa7101f97f4aed98a774c83e802bc9": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_a73abf53972a4913bf2c50b0d341fc13",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_a21851e9b7b24098b3fbbe6d9f1f8e9c",
"value": 422
}
},
"27a1f038ed8a41dfb4d18175f80aab7a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"27ca20cabb94467492f29713fad334dd": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"28f837093edd4e60bde32ec8d23b7497": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_de11572a1baa4efabaa5c1436af92f06",
"placeholder": "",
"style": "IPY_MODEL_b7341f8df10a4e1ab8c3cbb475de0d82",
"value": " 422/422 [00:01<00:00, 300.55it/s]"
}
},
"2be93e1e76224f61afd058eea4583a05": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"2c303e0697ec4cca9e949207f89d7695": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"2c6a4ebe2c914b028062eb3651de99e3": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"2cbcffe4ddd8450db4b259d368516d9d": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"2cdf06e67ced403795e3ba594fbf3c55": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"2e0751da78e14c88bfaf8b45f30fcd05": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"2e5a7bb281d840e6a4522a4dc602fd80": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"2e78f48b6298490885474f27bcbf2928": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"2eb23201ad354f3ba8a0720757d4d8bd": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"2fa0fd59f8844a4e99eda19fc4151962": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"3161c8b70d9748c5a1400c0fe543d442": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_3a1091487eac40cdac4046173caaaaf3",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_7326a18ab885419691c5bfc3666154bc",
"value": 422
}
},
"318b1b7bb5064c5aabd39540bf2d7bf2": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"31ef257d727346478de228f6cbd27021": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"32272e5a9e194bed9ffe5f16a3ae1d61": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"32ff8a0bfbcb44ebb0c80363f6115e07": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"34237141f8854d33af9223e75e390ba5": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"34c8794ff06d4875aec3ecaacb7f11bd": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_4a97428d8e9944d5b2a00466e6a80c53",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_31ef257d727346478de228f6cbd27021",
"value": 422
}
},
"34d176a20c5e429a8852cb3e2d3bb507": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"35181a9eb7bc4967984c327a8b6f7f8f": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_016371ed2b1e43d29e7542dc20f9ef64",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_b826f8a9468d4c23b35f6021791885b6",
"value": 422
}
},
"3563f3d4033b4ac7b43279cf13d8bdd2": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"370044cae56f42fd992e4c4ce3240f68": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_ca1968d9a9f7462e88c9a051cb7a6777",
"placeholder": "",
"style": "IPY_MODEL_8dde5fd1fe4347c7bea1c51c47b14e5e",
"value": " 422/422 [00:04<00:00, 95.92it/s]"
}
},
"3a1091487eac40cdac4046173caaaaf3": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"3a4fc2bfed8549d980a4884715d9c650": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_2233981c90214b7cae8d5bfc7410802b",
"placeholder": "",
"style": "IPY_MODEL_6d23bd6b099746028b353af12380d226",
"value": " 422/422 [00:04<00:00, 101.43it/s]"
}
},
"3ab377ee24544e2a95433448a8331ed1": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_70898e759e6945a1a479dcb2d434b50d",
"placeholder": "",
"style": "IPY_MODEL_7fcc9f928ddd4f52a9d420862b9be6ef",
"value": " 422/422 [00:01<00:00, 305.51it/s]"
}
},
"3b06657d55a44d2486a3d65af18286d6": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"3b153a8becf5411c871221d482680caa": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"3bbd206a74794ed4a5500b7bafe546ea": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_3161c8b70d9748c5a1400c0fe543d442",
"IPY_MODEL_421fbeb9dd4c422999932f7dcd210da7"
],
"layout": "IPY_MODEL_d6f5cf60a40c4825bfca5a960056d078"
}
},
"3c80e2934eda4d19825a76c6400a3823": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"3cf4e5fcc4a64c13b100093fb12e2cd2": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_6db4cb7968194992819296b341c378f1",
"placeholder": "",
"style": "IPY_MODEL_70537ee37fd645c8ba0d3e88fc43fdb5",
"value": " 422/422 [00:01<00:00, 310.58it/s]"
}
},
"3e395e497d8f4f10812b2af65ccac13b": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"3e448a3c76bc4fdfa57f82327411f2e2": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"3edb84e7ef724a1887df6dcf31f75448": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"3f0d72d9c4984f89a82862fcdd24643b": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"3fb34090b5b34d6fa8f039440e839fbd": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_0701da5b35604eaebcecf87335ad324c",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_3b153a8becf5411c871221d482680caa",
"value": 422
}
},
"4005b0b6131a400e927343af9a20e154": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_594aebcc9478429d9a5063327c9953bc",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_493826515d934872b80d127fbf8e6a85",
"value": 422
}
},
"407a6771394a4e69a504ea5b67164a64": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"40b7a89564684301ae357cae6c517be4": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"421fbeb9dd4c422999932f7dcd210da7": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_d64df8ba0ab04aad9e1c2c4551a0b7d9",
"placeholder": "",
"style": "IPY_MODEL_0ed86bd7b55f49bcb9927be28a279a49",
"value": " 422/422 [00:01<00:00, 301.60it/s]"
}
},
"4381199cd6fd46e98175bc65f3283a07": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_ad1659c9381a4afca67aac81d1470776",
"IPY_MODEL_28f837093edd4e60bde32ec8d23b7497"
],
"layout": "IPY_MODEL_f8c7cd8112274e26996a56ecae42ec3f"
}
},
"43b0d557889248f287edd4edf50e3a9b": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_7aebabcd7cd546e989eb0c148ebb1348",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_773cbd3194e142c694f73fd52ee6aa8c",
"value": 422
}
},
"43d68f9905354442bad801fb61bfd02a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_19b5d9601cb94873b3f550214d586577",
"placeholder": "",
"style": "IPY_MODEL_591e3bae35404f68b24f607ece97a9d3",
"value": " 422/422 [00:01<00:00, 304.08it/s]"
}
},
"449ba373a94641f2ac9078d8f98c3f1a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_14cf79b923404000b5eb2762d161ec78",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_3b06657d55a44d2486a3d65af18286d6",
"value": 422
}
},
"4553ca36932d4a20b898d7fa7b23bcc3": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_b32999bf8b8148e98bbbb6ba1acb4548",
"placeholder": "",
"style": "IPY_MODEL_27a1f038ed8a41dfb4d18175f80aab7a",
"value": " 422/422 [00:01<00:00, 308.41it/s]"
}
},
"46796020aa1145fc87c7f458ee41e14e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_877c9e4a3541425eac50c9250e5ff568",
"placeholder": "",
"style": "IPY_MODEL_76c193b0729b4a57b1664a94163bb38a",
"value": " 422/422 [00:01<00:00, 297.86it/s]"
}
},
"473ece24383049f6964082046f5a8cef": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"47a3f9dad5e2486b98ddd34b7bcf1201": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_a27c932db2c84a86a75a92af92a07f0b",
"IPY_MODEL_8d73c0c46c944eb89e95a9efb0a891a6"
],
"layout": "IPY_MODEL_9bc1cd208dfd4a55bd52a4d0f399117c"
}
},
"47fd69df905945a09183f89126c665f0": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"490a7c3043e24e378892816de13970eb": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"493826515d934872b80d127fbf8e6a85": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"49bbc050955c4ddd931efc9a468faccb": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"4a97428d8e9944d5b2a00466e6a80c53": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"4a9b8833512a4f07a4411b1e8c2ca0c8": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_a0b0dd366b424f03adde1e0698b8b31f",
"placeholder": "",
"style": "IPY_MODEL_aa1f805540a74c7380bd2b333b9ecc88",
"value": " 422/422 [00:01<00:00, 308.54it/s]"
}
},
"4b2f4fa05e7a465eacf809489243b2a8": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_e18cf1aef5834391ae7410dcb90f1c59",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_df732c32eb1340a890c181eed7358452",
"value": 422
}
},
"4b433703db1a41baa3e7e9f45d72aa57": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"4bccad99024f4d77a23b657d9dc7746d": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"4c60aa21f05a4d678b2135d4d1f2c99a": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"4c61f8b9314c4bec8a0c3a5ecab3356d": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"4c6f9216f6b14905bbfeffd81e14c973": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"4e2c73f549d94202a34b4a1046d011e5": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_8c10956546bd46cc94c322044b217a1a",
"IPY_MODEL_cfa1762b9dd740f39d4dc3f198abdcc6"
],
"layout": "IPY_MODEL_847aaa75aecc40f3bc9d67c37cf03362"
}
},
"4f27062765bd41f589215a61e2d0840a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_3edb84e7ef724a1887df6dcf31f75448",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_95f5d01ae2524dd2bcfab4945816b6a4",
"value": 422
}
},
"4faac8b7bdd748a7b908d46ed058aa3a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_8584d1e513e7496192af75d2cf4f84c0",
"IPY_MODEL_943c772ee3fe43cea3d05cea3f7f12bb"
],
"layout": "IPY_MODEL_995ac4803a534d0883a15702eae4fc50"
}
},
"501c6677916d40f9afaea8f48be244fe": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"50b4abff394846eabd6f6fdcac8a7762": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"50d48e02aa744ad8adbccce94148b060": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"50ded217cb2440249cd86a4dcc0e1b7a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_009991c634044a2c8dfb58aa7da342a6",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_987b752181564435ab5a7f8d838651f5",
"value": 422
}
},
"53065cc4a41d4652bfb986ae1d28ce9b": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_b7740ddd81034150bca1bf841f43af31",
"IPY_MODEL_c330191809e44832a89abcc6c4a6fd5d"
],
"layout": "IPY_MODEL_5cc3f44e4660481d920e9120fd0d254a"
}
},
"550caabed61042e1b89dd8a826897b77": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"565b64b2df2f4342bdb56ad34b468de2": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_6699fda35d5b495da6052e53f4e24a01",
"IPY_MODEL_649359bad86d415094308180006a0e11"
],
"layout": "IPY_MODEL_08b6e41233db46a6911d22fe531832f5"
}
},
"570de138283348be9b7575b51df33eea": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"583c6058061240838ad63f2a8ddf9a00": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"583d96a84b3642f4b864c7ae7ab6f598": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_5978e50fc32f4fe1ab99fbfc42e87c89",
"placeholder": "",
"style": "IPY_MODEL_ee11cbb4ec3d4042b6c62536a477c10a",
"value": " 422/422 [00:01<00:00, 314.30it/s]"
}
},
"584cadfb0a6c480f94aad4b9842c4d25": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_490a7c3043e24e378892816de13970eb",
"placeholder": "",
"style": "IPY_MODEL_a991cedbd4b24c139430a06369ad527f",
"value": " 422/422 [00:01<00:00, 310.32it/s]"
}
},
"591e3bae35404f68b24f607ece97a9d3": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"594aebcc9478429d9a5063327c9953bc": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"5978e50fc32f4fe1ab99fbfc42e87c89": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"5ac7bb8fbf0048c492fc4f6c8b69d828": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_fdd481739b6a4d64ac739965c621d3b8",
"placeholder": "",
"style": "IPY_MODEL_2c6a4ebe2c914b028062eb3651de99e3",
"value": " 422/422 [00:01<00:00, 311.10it/s]"
}
},
"5ac87cfe946d483b8088df90b22490d8": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"5bd005f55b724af4893bb2d0718d9408": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_4005b0b6131a400e927343af9a20e154",
"IPY_MODEL_87a3fcd0fd8d4df1b885b1e106d085a8"
],
"layout": "IPY_MODEL_2e5a7bb281d840e6a4522a4dc602fd80"
}
},
"5bfe581cc5db4002b38c89df6b245720": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"5c5c9d38d86c4d2486c547a3add4a04c": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_ceefe7af1738490d9414c6c690eb4681",
"placeholder": "",
"style": "IPY_MODEL_eb2cfbd11882434e9373f36d62d06258",
"value": " 422/422 [00:01<00:00, 307.38it/s]"
}
},
"5c9d8559f7134842ab00b99d28bfd644": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"5cc3f44e4660481d920e9120fd0d254a": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"5d84a716b7ed4ad8927ee8ea8493e4db": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"5eb4571729ea4b92857d3ba3c575721a": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"5edf3c8cd0f64046ac298d48a93ef32e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"5ee467d84bd24e5db0507a994273ea28": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"5fe58d7f655243c8b9d29f31a0eb7b53": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"608cfc45e4ec4579b75999043e62ab19": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"61540f7e300545eebbd697227a3b3d7d": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_d74548d2826c45cba4e730b3a9638fd4",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_a08bba98a4994866949e19f6104e6991",
"value": 422
}
},
"630b657a06384d7abd3b63bcf02c3a3e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_f7dd4ce672014ce6ba3d7d50ff090c9e",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_88a24010610d4a19a3fb81eddfcfaf52",
"value": 422
}
},
"637ac772584f4f169d59c0964be8acfd": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_e32b04ebb4454ca2848b799045c30320",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_fe0223b47b9346e7ad056b86841f91d9",
"value": 422
}
},
"6485d6b9b194425f8eb28b779d7d6e9a": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"649359bad86d415094308180006a0e11": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_a13d290a9ddf41deb2f258c617dfd2e3",
"placeholder": "",
"style": "IPY_MODEL_c1a8a841a5124a6eb364863a2fc11447",
"value": " 422/422 [00:01<00:00, 315.93it/s]"
}
},
"64cc1e2f91a44b218a7824aedd373b31": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"6565004456e24a88a2e1e04ebe7004df": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"65729f9c7457426eb006da855388182b": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"662835d152984f4eb87fa7cef0c0e28d": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"6699fda35d5b495da6052e53f4e24a01": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_afe82cf54da249cb815dc3ece7bac260",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_f82fd9ccee6b423c8bdbc830ba0f8a93",
"value": 422
}
},
"66fc9d3aad8f478c8884e74990c9f2f9": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"6790ba4beb8a4bae8f9a14f0cca3b5c9": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_21f755e5d9774eb3aab7604140054a6d",
"placeholder": "",
"style": "IPY_MODEL_f55d98fcc3524d94ba73b3192ee19281",
"value": " 422/422 [00:04<00:00, 104.34it/s]"
}
},
"68b365d2d435430aae5af88ecdae39a5": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"69f5f6ef84e44fc284dd9be06beaaf65": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"6ae1111eff4043f294a7bbafcb5de189": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_f3abedf22ff649b4b90e85fc68f93c67",
"IPY_MODEL_d4f5f106a2ed488fbba2b626bbd72714"
],
"layout": "IPY_MODEL_0443d3d3c8564616a9cae2616c370a72"
}
},
"6b3e7a3ba015416cada97bb79d4c8fb1": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_0a9276f6ae784e63a6daaecdd2bc7e64",
"IPY_MODEL_0046f7bffaa6438d8cf31a0d558c4b46"
],
"layout": "IPY_MODEL_a780eb0b79ff40e9966a72a3c5a81eca"
}
},
"6b89100361a443bd85a01f8e47374c96": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"6bc78efbc98a4cb5ad04647f893e2f1a": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"6cba2abf076d483ba8847ccbcf537c52": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"6d23bd6b099746028b353af12380d226": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"6da928e744da4d0bb1cbb1df33c201ee": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"6db4cb7968194992819296b341c378f1": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"6e396b43955349b28a280ccb1d41c716": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_2e78f48b6298490885474f27bcbf2928",
"placeholder": "",
"style": "IPY_MODEL_501c6677916d40f9afaea8f48be244fe",
"value": " 422/422 [00:01<00:00, 300.05it/s]"
}
},
"6f2523ef7d0243b38975e5b657b43dd9": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"6f8967c159b1474ab706026b6becd9a1": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_017d16a6d32b4d6a9f868a57cd736568",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_6da928e744da4d0bb1cbb1df33c201ee",
"value": 422
}
},
"7048d397ed0647a19587c17deec944bb": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_a4823e3708aa4ccba43e3547a229095e",
"placeholder": "",
"style": "IPY_MODEL_32272e5a9e194bed9ffe5f16a3ae1d61",
"value": " 422/422 [00:01<00:00, 309.82it/s]"
}
},
"70537ee37fd645c8ba0d3e88fc43fdb5": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"70898e759e6945a1a479dcb2d434b50d": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"70bb47f183e24e7c805d8b5e89ce1b3e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_c2b02e00355041919382a3c23c0d6850",
"IPY_MODEL_6e396b43955349b28a280ccb1d41c716"
],
"layout": "IPY_MODEL_838598dc75fb4836b3f4d769d68de597"
}
},
"71522233b5b64b3dbd29ec69e47788d8": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"7326a18ab885419691c5bfc3666154bc": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"732824b0216e44f7a88da251febff435": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_b91537eaa9ba44f8952cb65696d236fe",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_40b7a89564684301ae357cae6c517be4",
"value": 422
}
},
"734031c5bb6e4e7d9e30f1a15861924f": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_8a0e36efabb04f869ba2c080446525da",
"placeholder": "",
"style": "IPY_MODEL_34237141f8854d33af9223e75e390ba5",
"value": " 422/422 [00:01<00:00, 304.14it/s]"
}
},
"7411b7336cab4e8f92347df274e93876": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"74632c71eef34180a7dba0c22fd9df80": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"759a4d04fe92425ab0d6d8681d3ed0c6": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"76c193b0729b4a57b1664a94163bb38a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"77325a33c7b6495e9301c771dbf935de": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"773cbd3194e142c694f73fd52ee6aa8c": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"79574d891d35493da77efbbad6fff11f": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_989fb7b65c504a1aad8ec0f53e2e43c3",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_3563f3d4033b4ac7b43279cf13d8bdd2",
"value": 422
}
},
"79c6ecc409764fc78294fcbd9319c3c0": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"7ae0b9e9e6964641a7d529b03d60b034": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_84fb3096f29f40bf9f4d20bca7608c05",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_05de1b15d6244f82bcef782c8f73f8a8",
"value": 422
}
},
"7aebabcd7cd546e989eb0c148ebb1348": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"7b5bf62263a64441b7424aab153415be": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_d1758fb53b404e3f9b8f317be75684b4",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_7eb7725598674b13b049a370bb505ea7",
"value": 422
}
},
"7e44024663ce4bd4833c4dcdf2d63d14": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"7e569ac0f5974f4aaed7cf2e953bc316": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"7e91135b95964e35a11bd312983f31c3": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"7eb7725598674b13b049a370bb505ea7": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"7ed4511c19b24c7fadf283d962a612a2": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_4b2f4fa05e7a465eacf809489243b2a8",
"IPY_MODEL_a3426fcb07904ca4be130a71b330c77a"
],
"layout": "IPY_MODEL_7e44024663ce4bd4833c4dcdf2d63d14"
}
},
"7f3517a0116846708d65a947e534de05": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_ca4f1e1dbbbd46618b40291f94ef1102",
"IPY_MODEL_3ab377ee24544e2a95433448a8331ed1"
],
"layout": "IPY_MODEL_2fa0fd59f8844a4e99eda19fc4151962"
}
},
"7f60da0694d343fca576f9cfaf7352ac": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_db3333497eba4ee1a63225b4c6bf468c",
"placeholder": "",
"style": "IPY_MODEL_ea6cbc2071af4c018f96e5846f328a1f",
"value": " 422/422 [00:01<00:00, 308.78it/s]"
}
},
"7fcc9f928ddd4f52a9d420862b9be6ef": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"80016285573540f998aeec24132c0bb3": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_265c8c05122c406fa6b603070e2ca7bd",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_ebfa4d5418d84b3ea8fad62dfabce7c1",
"value": 422
}
},
"80538e2ccd7f490987a79d4c70c9571b": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_d17516de5c52450abfd79af7db04a282",
"IPY_MODEL_584cadfb0a6c480f94aad4b9842c4d25"
],
"layout": "IPY_MODEL_50b4abff394846eabd6f6fdcac8a7762"
}
},
"81a8b3ff6d644f2c83f04a835bef5333": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"82076a20084549719b917b071d1d1b91": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"829ad0cbf2bb49d9bde04dcff1f9cb3c": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"838598dc75fb4836b3f4d769d68de597": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"8404870254bf4dfa80ce1a11acaa5969": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"847aaa75aecc40f3bc9d67c37cf03362": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"84ddb87806a94da8902e3da8a06aa7e5": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"84fb3096f29f40bf9f4d20bca7608c05": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"8584d1e513e7496192af75d2cf4f84c0": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_dbd113a86ad44f0b95261ed9f6e6c93c",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_74632c71eef34180a7dba0c22fd9df80",
"value": 422
}
},
"85e62f6e0211431a9f6513285ef1aa0b": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"8619ba70e84f4bda9d5c966863d0d4e3": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_977ab80b593849389b224d18aefb6814",
"IPY_MODEL_d62d3423df7f49d18b45bb87e9919ade"
],
"layout": "IPY_MODEL_0c037dcc8bbb4f38977b3e516d3a2014"
}
},
"861a02913c8346f2a2c7c7c049364acd": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"86c4306d45c04ce783d40f202c30c350": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_6f8967c159b1474ab706026b6becd9a1",
"IPY_MODEL_5c5c9d38d86c4d2486c547a3add4a04c"
],
"layout": "IPY_MODEL_2be93e1e76224f61afd058eea4583a05"
}
},
"86ea34c601f447f18eaaf30fd1711f77": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"877c9e4a3541425eac50c9250e5ff568": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"87a3fcd0fd8d4df1b885b1e106d085a8": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_7e91135b95964e35a11bd312983f31c3",
"placeholder": "",
"style": "IPY_MODEL_7411b7336cab4e8f92347df274e93876",
"value": " 422/422 [00:04<00:00, 102.91it/s]"
}
},
"880dce5bac5148988119a4c0f7296c83": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_c2fa2316330f4edc83a8e3e4de571cf3",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_6cba2abf076d483ba8847ccbcf537c52",
"value": 422
}
},
"88a24010610d4a19a3fb81eddfcfaf52": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"8a0e36efabb04f869ba2c080446525da": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"8a25115e9fef433e90b70e1ff3bbe341": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"8b296000cd0446e2952bff859b785260": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_26fa7101f97f4aed98a774c83e802bc9",
"IPY_MODEL_3a4fc2bfed8549d980a4884715d9c650"
],
"layout": "IPY_MODEL_f6f53ccede894d5fa859543bcd5d07a1"
}
},
"8b2e77ece9f14c37ad8efab38442acbb": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"8c10956546bd46cc94c322044b217a1a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_77325a33c7b6495e9301c771dbf935de",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_5edf3c8cd0f64046ac298d48a93ef32e",
"value": 422
}
},
"8c639957ced245318db4865da9e0c1df": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_7b5bf62263a64441b7424aab153415be",
"IPY_MODEL_370044cae56f42fd992e4c4ce3240f68"
],
"layout": "IPY_MODEL_0cdbaa847609462aa3163c49db61a7cd"
}
},
"8d73c0c46c944eb89e95a9efb0a891a6": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_2c303e0697ec4cca9e949207f89d7695",
"placeholder": "",
"style": "IPY_MODEL_cf424d3cddcb4e469f4c88d1df70dedb",
"value": " 422/422 [00:01<00:00, 311.63it/s]"
}
},
"8dde5fd1fe4347c7bea1c51c47b14e5e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"8f521dc0bd094a3da7cffd4da51ce7e0": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"8ff0e222da2b47d99e8931080555cb7e": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"9024d8c79164492e80170ca565f2e367": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"92ae4ede68904e2f8b0b2322f099d32b": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"943c772ee3fe43cea3d05cea3f7f12bb": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_f4fd9c138d8c4192afdb4324afe46771",
"placeholder": "",
"style": "IPY_MODEL_054a82c3f2dc4de9864b83dcd1a2abd8",
"value": " 422/422 [00:04<00:00, 101.82it/s]"
}
},
"95f5d01ae2524dd2bcfab4945816b6a4": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"96e832df4dec40d9a97bd129ef55effa": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"973c950b92d14614a5df848d2a8c024e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_637ac772584f4f169d59c0964be8acfd",
"IPY_MODEL_dd2ea7e5c59f4a3a807a7f8048eafb1e"
],
"layout": "IPY_MODEL_a702297c0c7e40b08546a7669dfb6917"
}
},
"977ab80b593849389b224d18aefb6814": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_a859b046e9e84f518b8c4aa449fb75cc",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_473ece24383049f6964082046f5a8cef",
"value": 422
}
},
"97e1cf826ec54af79646bee305e23601": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_1c2657c607454a0eb329ae8663665e82",
"IPY_MODEL_1c4cfdd4f52a4db182cd08357b3d3abe"
],
"layout": "IPY_MODEL_c8de4a1a7be449e68787a4dcdc94801d"
}
},
"987b752181564435ab5a7f8d838651f5": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"989fb7b65c504a1aad8ec0f53e2e43c3": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"995ac4803a534d0883a15702eae4fc50": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"99c3aedf530c44d69f3c145efc7da6da": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_9024d8c79164492e80170ca565f2e367",
"placeholder": "",
"style": "IPY_MODEL_199cccb400aa4fc59a1f95317db0ae2b",
"value": " 422/422 [00:01<00:00, 307.88it/s]"
}
},
"99fd455414304cb5a2a08077c2182452": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_449ba373a94641f2ac9078d8f98c3f1a",
"IPY_MODEL_fa591816c3414db3a7dc1478649ac230"
],
"layout": "IPY_MODEL_65729f9c7457426eb006da855388182b"
}
},
"9b16033c7b7a47568a0d078d708cf28d": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"9bc1cd208dfd4a55bd52a4d0f399117c": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"9bce5f414ec043509c06f10840cc2d0f": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_50ded217cb2440249cd86a4dcc0e1b7a",
"IPY_MODEL_e1229e07527b4a67bb7de71715ed10ca"
],
"layout": "IPY_MODEL_47fd69df905945a09183f89126c665f0"
}
},
"9bf763a02b58454ab1e4f9598939fd79": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_e0077b7268614d85b4a08ad07183a2e4",
"placeholder": "",
"style": "IPY_MODEL_407a6771394a4e69a504ea5b67164a64",
"value": " 422/422 [00:01<00:00, 302.85it/s]"
}
},
"9c7baaf83e734b86b2234c735dca859d": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"9e2a1ec730be42d38a3f04b967ed0b88": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"9f6277814410407fae19e3aee971c997": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"a08bba98a4994866949e19f6104e6991": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"a0b0dd366b424f03adde1e0698b8b31f": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"a13d290a9ddf41deb2f258c617dfd2e3": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"a16d1171acfe48b5abfd3652d0cf119f": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"a183401dedf140979852bfc28122d689": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"a21851e9b7b24098b3fbbe6d9f1f8e9c": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"a27c932db2c84a86a75a92af92a07f0b": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_8b2e77ece9f14c37ad8efab38442acbb",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_a183401dedf140979852bfc28122d689",
"value": 422
}
},
"a3426fcb07904ca4be130a71b330c77a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_8a25115e9fef433e90b70e1ff3bbe341",
"placeholder": "",
"style": "IPY_MODEL_759a4d04fe92425ab0d6d8681d3ed0c6",
"value": " 422/422 [00:04<00:00, 99.10it/s]"
}
},
"a4823e3708aa4ccba43e3547a229095e": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"a5ac2b15a8ae40ce92160a960414279c": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_43b0d557889248f287edd4edf50e3a9b",
"IPY_MODEL_99c3aedf530c44d69f3c145efc7da6da"
],
"layout": "IPY_MODEL_b52a071acc34438d849dbc7adcc91631"
}
},
"a63ede007e94496c884527478a9b0d17": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"a702297c0c7e40b08546a7669dfb6917": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"a73abf53972a4913bf2c50b0d341fc13": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"a780eb0b79ff40e9966a72a3c5a81eca": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"a859b046e9e84f518b8c4aa449fb75cc": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"a887772c5344437aa896b3ed1bd5208f": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_e53b1041df93415c9852da6d8a05b7d3",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_e5caa26da44e411f891a71b85c46c87a",
"value": 422
}
},
"a991cedbd4b24c139430a06369ad527f": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"aa1f805540a74c7380bd2b333b9ecc88": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"aa3e1f1cf64a47af9aa0930b855c14bc": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_27ca20cabb94467492f29713fad334dd",
"placeholder": "",
"style": "IPY_MODEL_550caabed61042e1b89dd8a826897b77",
"value": " 422/422 [00:01<00:00, 304.47it/s]"
}
},
"aa7c50235a9f43fa96a7d9ef7be20861": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"aa86d75f3dba49a481c2c15167387b99": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_49bbc050955c4ddd931efc9a468faccb",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_82076a20084549719b917b071d1d1b91",
"value": 422
}
},
"ab756a05d67241baabcb4ee84b35efe3": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_de806eb1cca0447c92917881ec18d49a",
"IPY_MODEL_00a9eda2268e47baaae96926bb7d8c51"
],
"layout": "IPY_MODEL_9b16033c7b7a47568a0d078d708cf28d"
}
},
"ab99a2cf8919454bb780cc24088f2b85": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_fddd7fb9ef3f4535a97775548abbabd1",
"IPY_MODEL_fa68877e48ab41a48a3c10dc3fe329d1"
],
"layout": "IPY_MODEL_9c7baaf83e734b86b2234c735dca859d"
}
},
"ac609c059aac45bba0ea7ee76e057977": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_318b1b7bb5064c5aabd39540bf2d7bf2",
"placeholder": "",
"style": "IPY_MODEL_fdd43b573d18496a9eec5f61632b33ed",
"value": " 422/422 [00:01<00:00, 315.84it/s]"
}
},
"ad1659c9381a4afca67aac81d1470776": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_191e3c3e02b44eda9ebe4859325830e5",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_f0c3740bcfb04fa6a12e1f1025aff42f",
"value": 422
}
},
"ad58dbfc6a294f3f9dc89c4a3f9658dc": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_0eea906841464f978a262640badc0fae",
"IPY_MODEL_11daf0a19032495ab15cdda96b0030e7"
],
"layout": "IPY_MODEL_8f521dc0bd094a3da7cffd4da51ce7e0"
}
},
"ae7ec72f5d064a6a80433613be03f849": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"af32974b6ed54e9da7b89fe35a749640": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"afe82cf54da249cb815dc3ece7bac260": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"b1548f9f9f4642ef814e9d3565002501": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"b31897c725c946e8a08a2ef0e80b6520": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"b32999bf8b8148e98bbbb6ba1acb4548": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"b34c1730161e41c5bb901128ccb4ff65": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"b3664609744546988f30152c49d02ac8": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_247a4869ae564aefa501053956083a6a",
"IPY_MODEL_5ac7bb8fbf0048c492fc4f6c8b69d828"
],
"layout": "IPY_MODEL_3e395e497d8f4f10812b2af65ccac13b"
}
},
"b3f89680663b466f9dda719fa6843707": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"b4979a909ae64f48888d2383285f4f68": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"b50ff7a73c0f4a6da93443ca019ba785": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_e2a09b4280db464f863a1ae66d58ea02",
"IPY_MODEL_7f60da0694d343fca576f9cfaf7352ac"
],
"layout": "IPY_MODEL_0a9b4f19291b403dadeca3938004561e"
}
},
"b52a071acc34438d849dbc7adcc91631": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"b668e892e9cb4571b173390e78d8d786": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_79574d891d35493da77efbbad6fff11f",
"IPY_MODEL_583d96a84b3642f4b864c7ae7ab6f598"
],
"layout": "IPY_MODEL_7e569ac0f5974f4aaed7cf2e953bc316"
}
},
"b7341f8df10a4e1ab8c3cbb475de0d82": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"b7740ddd81034150bca1bf841f43af31": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_583c6058061240838ad63f2a8ddf9a00",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_9e2a1ec730be42d38a3f04b967ed0b88",
"value": 422
}
},
"b775d1f8fbae434185d52dafd207b592": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"b826f8a9468d4c23b35f6021791885b6": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"b875c88f7ae74f8391ac58fc31a3aabc": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"b91537eaa9ba44f8952cb65696d236fe": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"b9862082c1cf47d1bb6057537571499d": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"b9db25806c4a410db0004f77a1acaee5": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"bce5275ceea044c3ba0f1635b0657ac4": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_1017871f46b44c9cb3f976d1af607ffc",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_caff75ab1b874f9da109674579faadf3",
"value": 422
}
},
"c0db04b51d0c4e9e9081c0169cca1f4e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_80016285573540f998aeec24132c0bb3",
"IPY_MODEL_f999eff152854b6da98defded43c8818"
],
"layout": "IPY_MODEL_cb35834c71e24301abd3574d82e017b9"
}
},
"c1a8a841a5124a6eb364863a2fc11447": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"c2b02e00355041919382a3c23c0d6850": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_1480b195483d454c844c9e6a724b02b3",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_9f6277814410407fae19e3aee971c997",
"value": 422
}
},
"c2fa2316330f4edc83a8e3e4de571cf3": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"c330191809e44832a89abcc6c4a6fd5d": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_110f5508e00e4ec6b5c84877abc9c265",
"placeholder": "",
"style": "IPY_MODEL_e731309213c94e92be1a389d13a190f5",
"value": " 422/422 [00:01<00:00, 314.36it/s]"
}
},
"c5b2f89f03264317aa734883c164cabb": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_b31897c725c946e8a08a2ef0e80b6520",
"placeholder": "",
"style": "IPY_MODEL_79c6ecc409764fc78294fcbd9319c3c0",
"value": " 422/422 [00:01<00:00, 305.91it/s]"
}
},
"c808822cdef941a3879bf1cc2b32720a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_570de138283348be9b7575b51df33eea",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_c8967d63161647d4a9e778765d3e7ac2",
"value": 422
}
},
"c8967d63161647d4a9e778765d3e7ac2": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"c8de4a1a7be449e68787a4dcdc94801d": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"c918030a689a4842b727d9b7ecbe39ae": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"c9892de0df614a84b8ba7a6e259ffe09": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_86ea34c601f447f18eaaf30fd1711f77",
"placeholder": "",
"style": "IPY_MODEL_fac6ce9bd41f43f9821f45741359f5f4",
"value": " 422/422 [00:04<00:00, 101.74it/s]"
}
},
"c99185c8dfbe47aab9e29a90b92c63a0": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_1687134f713c4c039365c0a66d1aab82",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_0829771aa327491e8b7d05ef2b084175",
"value": 422
}
},
"ca1968d9a9f7462e88c9a051cb7a6777": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"ca4f1e1dbbbd46618b40291f94ef1102": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_b1548f9f9f4642ef814e9d3565002501",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_5fe58d7f655243c8b9d29f31a0eb7b53",
"value": 422
}
},
"caff75ab1b874f9da109674579faadf3": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"cb24ce5968194d92bf77463a59eea46b": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_aa86d75f3dba49a481c2c15167387b99",
"IPY_MODEL_e83ab25fcda94a90bdd58143c3624263"
],
"layout": "IPY_MODEL_5bfe581cc5db4002b38c89df6b245720"
}
},
"cb35834c71e24301abd3574d82e017b9": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"cc7f3e43ff504effb6fcbd63bac5546f": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"cd54b80e170c4a6c94fe419c2cf8bcd6": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_4c60aa21f05a4d678b2135d4d1f2c99a",
"placeholder": "",
"style": "IPY_MODEL_81a8b3ff6d644f2c83f04a835bef5333",
"value": " 422/422 [00:04<00:00, 36.83it/s]"
}
},
"ce87dfd22b554e0a83b264913f7156c6": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"ceefe7af1738490d9414c6c690eb4681": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"cf424d3cddcb4e469f4c88d1df70dedb": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"cfa1762b9dd740f39d4dc3f198abdcc6": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_2eb23201ad354f3ba8a0720757d4d8bd",
"placeholder": "",
"style": "IPY_MODEL_ce87dfd22b554e0a83b264913f7156c6",
"value": " 422/422 [00:01<00:00, 303.83it/s]"
}
},
"d17516de5c52450abfd79af7db04a282": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_3c80e2934eda4d19825a76c6400a3823",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_22a455a7b3f841a9b01bc83dee7717cc",
"value": 422
}
},
"d1758fb53b404e3f9b8f317be75684b4": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"d23105e4cc4141b5937063dc03e9d565": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"d266fcfaaa9741b1a4be560f094ac1d2": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_71522233b5b64b3dbd29ec69e47788d8",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_db2b8e98efcd4a7599b7bbd57c55e70c",
"value": 422
}
},
"d4f5f106a2ed488fbba2b626bbd72714": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_ebebf706cc9549d9803b796ea2ef2853",
"placeholder": "",
"style": "IPY_MODEL_b9862082c1cf47d1bb6057537571499d",
"value": " 422/422 [00:01<00:00, 312.14it/s]"
}
},
"d62d3423df7f49d18b45bb87e9919ade": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_b3f89680663b466f9dda719fa6843707",
"placeholder": "",
"style": "IPY_MODEL_50d48e02aa744ad8adbccce94148b060",
"value": " 422/422 [00:01<00:00, 298.70it/s]"
}
},
"d64df8ba0ab04aad9e1c2c4551a0b7d9": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"d673cd4b0e384aee988f565ab73cb022": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"d6f5cf60a40c4825bfca5a960056d078": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"d74548d2826c45cba4e730b3a9638fd4": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"d7d3d6ec8a144738b61b59e8e1d37f1e": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"d904c7aebabb477a99f41e95f1b0b9cf": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"d98b018eb67447eca644cd5fa56f2723": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_a887772c5344437aa896b3ed1bd5208f",
"IPY_MODEL_734031c5bb6e4e7d9e30f1a15861924f"
],
"layout": "IPY_MODEL_8ff0e222da2b47d99e8931080555cb7e"
}
},
"d9ba3c88398d4e12a674eca10a8633ff": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_d266fcfaaa9741b1a4be560f094ac1d2",
"IPY_MODEL_cd54b80e170c4a6c94fe419c2cf8bcd6"
],
"layout": "IPY_MODEL_2637352462d94fe795a409fbbbe70926"
}
},
"db1b1bff50974cd0823dcccecd2ae1c4": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_e47127013eaa48979dc86657839ba227",
"IPY_MODEL_eb6a91e79a1b4998bc2751d3e572b05f"
],
"layout": "IPY_MODEL_fad451dd3ae040a8aede364098400642"
}
},
"db2b8e98efcd4a7599b7bbd57c55e70c": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"db3333497eba4ee1a63225b4c6bf468c": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"db4cab1a1150482c861e22d6e7b2be09": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"dba0b00af12d410cb8dfd4be838b173a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_bce5275ceea044c3ba0f1635b0657ac4",
"IPY_MODEL_7048d397ed0647a19587c17deec944bb"
],
"layout": "IPY_MODEL_b34c1730161e41c5bb901128ccb4ff65"
}
},
"dbd113a86ad44f0b95261ed9f6e6c93c": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"dc4f419827134de0b96b9b020fa8a788": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"dd2ea7e5c59f4a3a807a7f8048eafb1e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_69f5f6ef84e44fc284dd9be06beaaf65",
"placeholder": "",
"style": "IPY_MODEL_608cfc45e4ec4579b75999043e62ab19",
"value": " 422/422 [00:01<00:00, 301.40it/s]"
}
},
"dd43258264da429a9aeb315f1715b7db": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_4bccad99024f4d77a23b657d9dc7746d",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_861a02913c8346f2a2c7c7c049364acd",
"value": 422
}
},
"dd52397ee21b49f999121fb8a62ea4bf": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"de11572a1baa4efabaa5c1436af92f06": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"de806eb1cca0447c92917881ec18d49a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_e9c21ca7465449d3a8930234e9ed8dcf",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_64cc1e2f91a44b218a7824aedd373b31",
"value": 422
}
},
"df732c32eb1340a890c181eed7358452": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"df901f8348654af087a63b43ec822b9e": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_3fb34090b5b34d6fa8f039440e839fbd",
"IPY_MODEL_46796020aa1145fc87c7f458ee41e14e"
],
"layout": "IPY_MODEL_dffdd7d2ddf24707a1b2fb0df00cf957"
}
},
"dffdd7d2ddf24707a1b2fb0df00cf957": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"e0077b7268614d85b4a08ad07183a2e4": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"e1229e07527b4a67bb7de71715ed10ca": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_6bc78efbc98a4cb5ad04647f893e2f1a",
"placeholder": "",
"style": "IPY_MODEL_5ee467d84bd24e5db0507a994273ea28",
"value": " 422/422 [00:01<00:00, 213.42it/s]"
}
},
"e18cf1aef5834391ae7410dcb90f1c59": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"e28f8422f22b4ef29ec9f6cffe959ecd": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_dd43258264da429a9aeb315f1715b7db",
"IPY_MODEL_43d68f9905354442bad801fb61bfd02a"
],
"layout": "IPY_MODEL_03edea4af2724453bfbb6b8e2e74cda2"
}
},
"e2a09b4280db464f863a1ae66d58ea02": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_5d84a716b7ed4ad8927ee8ea8493e4db",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_5ac87cfe946d483b8088df90b22490d8",
"value": 422
}
},
"e32b04ebb4454ca2848b799045c30320": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"e3ddfbf3678a41ab9926976cf1beee85": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_7ae0b9e9e6964641a7d529b03d60b034",
"IPY_MODEL_4a9b8833512a4f07a4411b1e8c2ca0c8"
],
"layout": "IPY_MODEL_a16d1171acfe48b5abfd3652d0cf119f"
}
},
"e47127013eaa48979dc86657839ba227": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_ff6267b3dd63479a8e260848a30709a8",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_0b62d25712a2403ab488914a7109ab47",
"value": 422
}
},
"e4e73b9dcadd4bacb50d03152e7d81e8": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_6485d6b9b194425f8eb28b779d7d6e9a",
"placeholder": "",
"style": "IPY_MODEL_6b89100361a443bd85a01f8e47374c96",
"value": " 422/422 [00:04<00:00, 95.15it/s]"
}
},
"e53b1041df93415c9852da6d8a05b7d3": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"e567c933e82146758cb690a190f1199c": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"e5caa26da44e411f891a71b85c46c87a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"e6b70306bfea4bd09f17eac179387a96": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"e731309213c94e92be1a389d13a190f5": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"e82d78f382b343c384bb7895daa18c82": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"e83ab25fcda94a90bdd58143c3624263": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_5eb4571729ea4b92857d3ba3c575721a",
"placeholder": "",
"style": "IPY_MODEL_ae7ec72f5d064a6a80433613be03f849",
"value": " 422/422 [00:01<00:00, 294.05it/s]"
}
},
"e9228023d62243569a1e2e06a1cf94b5": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_c808822cdef941a3879bf1cc2b32720a",
"IPY_MODEL_e4e73b9dcadd4bacb50d03152e7d81e8"
],
"layout": "IPY_MODEL_f08fd096dcb743faafc6dbc604b45989"
}
},
"e9c21ca7465449d3a8930234e9ed8dcf": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"ea6cbc2071af4c018f96e5846f328a1f": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"eaa7c96f7ee849948f10033289dfbd05": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"eac2608b4c0e4ad9ac81838ad7d68657": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"eb2cfbd11882434e9373f36d62d06258": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"eb6a91e79a1b4998bc2751d3e572b05f": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_8404870254bf4dfa80ce1a11acaa5969",
"placeholder": "",
"style": "IPY_MODEL_dd52397ee21b49f999121fb8a62ea4bf",
"value": " 422/422 [00:04<00:00, 98.38it/s]"
}
},
"ebebf706cc9549d9803b796ea2ef2853": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"ebfa4d5418d84b3ea8fad62dfabce7c1": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"ee11cbb4ec3d4042b6c62536a477c10a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"ef3638ddf37547fe8359b8f4333dc2e8": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"efb84c9861764c61b09ad02f170d8530": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"f08fd096dcb743faafc6dbc604b45989": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"f0c3740bcfb04fa6a12e1f1025aff42f": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"f160728d6cfc4cd380fafce5b253d2c7": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_0b2d03b2d0f6447293576ff8e9ed9ea3",
"IPY_MODEL_f839ef53446f4fa9834edc8da44f1327"
],
"layout": "IPY_MODEL_b9db25806c4a410db0004f77a1acaee5"
}
},
"f3abedf22ff649b4b90e85fc68f93c67": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_e82d78f382b343c384bb7895daa18c82",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_d904c7aebabb477a99f41e95f1b0b9cf",
"value": 422
}
},
"f4fd9c138d8c4192afdb4324afe46771": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"f55d98fcc3524d94ba73b3192ee19281": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"f5f4c5a71a9c41f88fafc9abea500f14": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_eac2608b4c0e4ad9ac81838ad7d68657",
"placeholder": "",
"style": "IPY_MODEL_c918030a689a4842b727d9b7ecbe39ae",
"value": " 422/422 [00:01<00:00, 313.61it/s]"
}
},
"f6f53ccede894d5fa859543bcd5d07a1": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"f7dd4ce672014ce6ba3d7d50ff090c9e": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"f82fd9ccee6b423c8bdbc830ba0f8a93": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"f839ef53446f4fa9834edc8da44f1327": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_0459c93679e54f9db4e7009420b8a612",
"placeholder": "",
"style": "IPY_MODEL_68b365d2d435430aae5af88ecdae39a5",
"value": " 422/422 [00:01<00:00, 292.61it/s]"
}
},
"f8c7cd8112274e26996a56ecae42ec3f": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"f999eff152854b6da98defded43c8818": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_92ae4ede68904e2f8b0b2322f099d32b",
"placeholder": "",
"style": "IPY_MODEL_85e62f6e0211431a9f6513285ef1aa0b",
"value": " 422/422 [00:01<00:00, 309.42it/s]"
}
},
"fa591816c3414db3a7dc1478649ac230": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_829ad0cbf2bb49d9bde04dcff1f9cb3c",
"placeholder": "",
"style": "IPY_MODEL_d673cd4b0e384aee988f565ab73cb022",
"value": " 422/422 [00:01<00:00, 311.00it/s]"
}
},
"fa68877e48ab41a48a3c10dc3fe329d1": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_3e448a3c76bc4fdfa57f82327411f2e2",
"placeholder": "",
"style": "IPY_MODEL_96e832df4dec40d9a97bd129ef55effa",
"value": " 422/422 [00:01<00:00, 317.23it/s]"
}
},
"faa57a96c1e7416fa9a9a41b5bc6e308": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_732824b0216e44f7a88da251febff435",
"IPY_MODEL_c5b2f89f03264317aa734883c164cabb"
],
"layout": "IPY_MODEL_b4979a909ae64f48888d2383285f4f68"
}
},
"fac6ce9bd41f43f9821f45741359f5f4": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"fad451dd3ae040a8aede364098400642": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"fc6d6eef364446c3b54b68c5c4c1cb1b": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_1de0cf05f59b49eb8f8d233b9a4e490e",
"IPY_MODEL_9bf763a02b58454ab1e4f9598939fd79"
],
"layout": "IPY_MODEL_662835d152984f4eb87fa7cef0c0e28d"
}
},
"fce1dcea8fd749569b29e353a086ad45": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"fdd43b573d18496a9eec5f61632b33ed": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"fdd481739b6a4d64ac739965c621d3b8": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"fddd7fb9ef3f4535a97775548abbabd1": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "100%",
"description_tooltip": null,
"layout": "IPY_MODEL_3f0d72d9c4984f89a82862fcdd24643b",
"max": 422,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_32ff8a0bfbcb44ebb0c80363f6115e07",
"value": 422
}
},
"fe0223b47b9346e7ad056b86841f91d9": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": "initial"
}
},
"ff6267b3dd63479a8e260848a30709a8": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
}
}
}
},
"nbformat": 4,
"nbformat_minor": 1
}
thinc-release-v9.1.1/examples/01_intro_model_definition_methods.ipynb 0000664 0000000 0000000 00000031435 14670643317 0026103 0 ustar 00root root 0000000 0000000 {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Intro to Thinc's `Model` class, model definition and methods\n",
"\n",
"Thinc follows a functional-programming approach to model definition. Its approach is especially effective for **complicated network architectures**, and use cases where different data types need to be passed through the network to reach specific subcomponents. This notebook shows how to compose Thinc models and how to use the `Model` class and its methods."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install \"thinc>=8.0.0\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Thinc provides a variety of [layers](https://thinc.ai/docs/api-layers), functions that create `Model` instances. Thinc tries to avoid inheritance, preferring function composition. The `Linear` function gives you a model that computes `Y = X @ W.T + b` (the function is defined in `thinc.layers.linear.forward`)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy\n",
"from thinc.api import Linear, zero_init\n",
"\n",
"n_in = numpy.zeros((128, 16), dtype=\"f\")\n",
"n_out = numpy.zeros((128, 10), dtype=\"f\")\n",
"\n",
"model = Linear(nI=n_in.shape[1], nO=n_out.shape[1], init_W=zero_init)\n",
"nI = model.get_dim(\"nI\")\n",
"nO = model.get_dim(\"nO\")\n",
"print(f\"Initialized model with input dimension nI={nI} and output dimension nO={nO}.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Models support **dimension inference from data**. You can defer some or all of the dimensions."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = Linear(init_W=zero_init)\n",
"print(f\"Initialized model with no input/output dimensions.\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X = numpy.zeros((128, 16), dtype=\"f\")\n",
"Y = numpy.zeros((128, 10), dtype=\"f\")\n",
"model.initialize(X=X, Y=Y)\n",
"nI = model.get_dim(\"nI\")\n",
"nO = model.get_dim(\"nO\")\n",
"print(f\"Initialized model with input dimension nI={nI} and output dimension nO={nO}.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The `chain` function wires two model instances together, with a feed-forward relationship. Dimension inference is especially helpful here."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import chain, glorot_uniform_init\n",
"\n",
"n_hidden = 128\n",
"X = numpy.zeros((128, 16), dtype=\"f\")\n",
"Y = numpy.zeros((128, 10), dtype=\"f\")\n",
"\n",
"model = chain(Linear(n_hidden, init_W=glorot_uniform_init), Linear(init_W=zero_init),)\n",
"model.initialize(X=X, Y=Y)\n",
"nI = model.get_dim(\"nI\")\n",
"nO = model.get_dim(\"nO\")\n",
"nO_hidden = model.layers[0].get_dim(\"nO\")\n",
"print(f\"Initialized model with input dimension nI={nI} and output dimension nO={nO}.\")\n",
"print(f\"The size of the hidden layer is {nO_hidden}.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We call functions like `chain` [**combinators**](https://thinc.ai/docs/api-layers#combinators). Combinators take one or more models as arguments, and return another model instance, without introducing any new weight parameters. Another useful combinator is `concatenate`:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import concatenate\n",
"\n",
"model = concatenate(Linear(n_hidden), Linear(n_hidden))\n",
"model.initialize(X=X)\n",
"nO = model.get_dim(\"nO\")\n",
"print(f\"Initialized model with output dimension nO={nO}.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The `concatenate` function produces a layer that **runs the child layers separately**, and then **concatenates their outputs together**. This is often useful for combining features from different sources. For instance, we use this all the time to build [spaCy](https://spacy.io)'s embedding layers.\n",
"\n",
"Some combinators work on a layer and a numeric argument. For instance, the `clone` combinator creates a number of copies of a layer, and chains them together into a deep feed-forward network. The shape inference is especially handy here: we want the first and last layers to have different shapes, so we can avoid providing any dimensions into the layer we clone. We then just have to specify the first layer's output size, and we can let the rest of the dimensions be inferred from the data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import clone\n",
"\n",
"model = clone(Linear(), 5)\n",
"model.layers[0].set_dim(\"nO\", n_hidden)\n",
"model.initialize(X=X, Y=Y)\n",
"nI = model.get_dim(\"nI\")\n",
"nO = model.get_dim(\"nO\")\n",
"print(f\"Initialized model with input dimension nI={nI} and output dimension nO={nO}.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can apply `clone` to model instances that have child layers, making it easy to define more complex architectures. For instance, we often want to attach an activation function and dropout to a linear layer, and then repeat that substructure a number of times. Of course, you can make whatever intermediate functions you find helpful."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import Relu, Dropout\n",
"\n",
"def Hidden(dropout=0.2):\n",
" return chain(Linear(), Relu(), Dropout(dropout))\n",
"\n",
"model = clone(Hidden(0.2), 5)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Some combinators are unary functions: they take only one model. These are usually **input and output transformations**. For instance, the `with_array` combinator produces a model that flattens lists of arrays into a single array, and then calls the child layer to get the flattened output. It then reverses the transformation on the output."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import with_array\n",
"\n",
"model = with_array(Linear(4, 2))\n",
"Xs = [model.ops.alloc2f(10, 2, dtype=\"f\")]\n",
"model.initialize(X=Xs)\n",
"Ys = model.predict(Xs)\n",
"print(f\"Prediction shape: {Ys[0].shape}.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The combinator system makes it easy to wire together complex models very concisely. A concise notation is a huge advantage, because it lets you read and review your model with less clutter – making it easy to spot mistakes, and easy to make changes. For the ultimate in concise notation, you can also take advantage of Thinc's **operator overloading**, which lets you use an infix notation. Operator overloading can lead to unexpected results, so you have to enable the overloading explicitly **in a contextmanager**. This also lets you control how the operators are bound, making it easy to use the feature with your own combinators. For instance, here is a definition for a text classification network:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import add, chain, concatenate, clone\n",
"from thinc.api import with_array, reduce_max, reduce_mean, residual\n",
"from thinc.api import Model, Embed, Maxout, Softmax\n",
"\n",
"nH = 5\n",
"\n",
"with Model.define_operators({\">>\": chain, \"|\": concatenate, \"+\": add, \"**\": clone}):\n",
" model = (\n",
" with_array(\n",
" (Embed(128, column=0) + Embed(64, column=1))\n",
" >> Maxout(nH, normalize=True, dropout=0.2)\n",
" )\n",
" >> (reduce_max() | reduce_mean())\n",
" >> residual(Relu() >> Dropout(0.2)) ** 2\n",
" >> Softmax()\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The network above will expect a list of arrays as input, where each array should have two columns with different numeric identifier features. The two features will be embedded using separate embedding tables, and the two vectors added and passed through a `Maxout` layer with layer normalization and dropout. The sequences then pass through two pooling functions, and the concatenated results are passed through 2 `Relu` layers with dropout and residual connections. Finally, the sequence vectors are passed through an output layer, which has a `Softmax` activation."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"\n",
"## Using a model\n",
"\n",
"Define the model:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import Linear, Adam\n",
"import numpy\n",
"\n",
"X = numpy.zeros((128, 10), dtype=\"f\")\n",
"dY = numpy.zeros((128, 10), dtype=\"f\")\n",
"\n",
"model = Linear(10, 10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Initialize the model with a sample of the data:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.initialize(X=X, Y=dY)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Run the model over some data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y = model.predict(X)\n",
"Y"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Get a callback to backpropagate:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Y, backprop = model.begin_update(X)\n",
"Y, backprop"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Run the callback to calculate the gradient with respect to the inputs. If the model has trainable parameters, gradients for the parameters are accumulated internally, as a side-effect."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dX = backprop(dY)\n",
"dX"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The `backprop()` callback only increments the parameter gradients, it doesn't actually change the weights. To increment the weights, call `model.finish_update()`, passing it an optimizer:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"optimizer = Adam()\n",
"model.finish_update(optimizer)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can get and set dimensions, parameters and attributes by name:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dim = model.get_dim(\"nO\")\n",
"W = model.get_param(\"W\")\n",
"model.attrs[\"hello\"] = \"world\"\n",
"model.attrs.get(\"foo\", \"bar\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can also retrieve parameter gradients, and increment them explicitly:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dW = model.get_grad(\"W\")\n",
"model.inc_grad(\"W\", dW * 0.1)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, you can serialize models using the `model.to_bytes` and `model.to_disk` methods, and load them back with `from_bytes` and `from_disk`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_bytes = model.to_bytes()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
thinc-release-v9.1.1/examples/02_transformers_tagger_bert.ipynb 0000664 0000000 0000000 00000052112 14670643317 0024723 0 ustar 00root root 0000000 0000000 {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Training a part-of-speech tagger with transformers (BERT)\n",
"\n",
"This example shows how to use Thinc and Hugging Face's [`transformers`](https://github.com/huggingface/transformers) library to implement and train a part-of-speech tagger on the Universal Dependencies [AnCora corpus](https://github.com/UniversalDependencies/UD_Spanish-AnCora). This notebook assumes familiarity with machine learning concepts, transformer models and Thinc's config system and `Model` API (see the \"Thinc for beginners\" notebook and the [documentation](https://thinc.ai/docs) for more info)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"!pip install \"thinc>=8.0.0\" transformers torch \"ml_datasets>=0.2.0\" \"tqdm>=4.41\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"First, let's use Thinc's `prefer_gpu` helper to make sure we're performing operations **on GPU if available**. The function should be called right after importing Thinc, and it returns a boolean indicating whether the GPU has been activated. If we're on GPU, we can also call `use_pytorch_for_gpu_memory` to route `cupy`'s memory allocation via PyTorch, so both can play together nicely."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import prefer_gpu, use_pytorch_for_gpu_memory\n",
"\n",
"is_gpu = prefer_gpu()\n",
"print(\"GPU:\", is_gpu)\n",
"if is_gpu:\n",
" use_pytorch_for_gpu_memory()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Overview: the final config\n",
"\n",
"Here's the final config for the model we're building in this notebook. It references a custom `TransformersTagger` that takes the name of a starter (the pretrained model to use), an optimizer, a learning rate schedule with warm-up and the general training settings. You can keep the config string within your file or notebook, or save it to a `conig.cfg` file and load it in via `Config.from_disk`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"CONFIG = \"\"\"\n",
"[model]\n",
"@layers = \"TransformersTagger.v1\"\n",
"starter = \"bert-base-multilingual-cased\"\n",
"\n",
"[optimizer]\n",
"@optimizers = \"Adam.v1\"\n",
"\n",
"[optimizer.learn_rate]\n",
"@schedules = \"warmup_linear.v1\"\n",
"initial_rate = 0.01\n",
"warmup_steps = 3000\n",
"total_steps = 6000\n",
"\n",
"[loss]\n",
"@losses = \"SequenceCategoricalCrossentropy.v1\"\n",
"\n",
"[training]\n",
"batch_size = 128\n",
"words_per_subbatch = 2000\n",
"n_epoch = 10\n",
"\"\"\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"\n",
"## Defining the model\n",
"\n",
"The Thinc model we want to define should consist of 3 components: the transformers **tokenizer**, the actual **transformer** implemented in PyTorch and a **softmax-activated output layer**.\n",
"\n",
"\n",
"### 1. Wrapping the tokenizer\n",
"\n",
"To make it easier to keep track of the data that's passed around (and get type errors if something goes wrong), we first create a `TokensPlus` dataclass that holds the information we need from the `transformers` tokenizer. The most important work we'll do in this class is to build an _alignment map_. The transformer models are trained on input sequences that over-segment the sentence, so that they can work on smaller vocabularies. These over-segmentations are generally called \"word pieces\". The transformer will return a tensor with one vector per wordpiece. We need to map that to a tensor with one vector per POS-tagged token. We'll pass those token representations into a feed-forward network to predict the tag probabilities. During the backward pass, we'll then need to invert this mapping, so that we can calculate the gradients with respect to the wordpieces given the gradients with respect to the tokens. To keep things relatively simple, we'll store the alignment as a list of arrays, with each array mapping one token to one wordpiece vector (its first one). To make this work, we'll need to run the tokenizer with `is_split_into_words=True`, which should ensure that we get at least one wordpiece per token."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import Optional, List\n",
"import numpy\n",
"from thinc.types import Ints1d, Floats2d\n",
"from dataclasses import dataclass\n",
"import torch\n",
"from transformers import BatchEncoding, TokenSpan\n",
"\n",
"\n",
"@dataclass\n",
"class TokensPlus:\n",
" batch_size: int\n",
" tok2wp: List[Ints1d]\n",
" input_ids: torch.Tensor\n",
" token_type_ids: torch.Tensor\n",
" attention_mask: torch.Tensor\n",
" \n",
" def __init__(self, inputs: List[List[str]], wordpieces: BatchEncoding):\n",
" self.input_ids = wordpieces[\"input_ids\"]\n",
" self.attention_mask = wordpieces[\"attention_mask\"]\n",
" self.token_type_ids = wordpieces[\"token_type_ids\"]\n",
" self.batch_size = self.input_ids.shape[0]\n",
" self.tok2wp = []\n",
" for i in range(self.batch_size):\n",
" spans = [wordpieces.word_to_tokens(i, j) for j in range(len(inputs[i]))]\n",
" self.tok2wp.append(self.get_wp_starts(spans))\n",
" \n",
" def get_wp_starts(self, spans: List[Optional[TokenSpan]]) -> Ints1d:\n",
" \"\"\"Calculate an alignment mapping each token index to its first wordpiece.\"\"\"\n",
" alignment = numpy.zeros((len(spans)), dtype=\"i\")\n",
" for i, span in enumerate(spans):\n",
" if span is None:\n",
" raise ValueError(\n",
" \"Token did not align to any wordpieces. Was the tokenizer \"\n",
" \"run with is_split_into_words=True?\"\n",
" )\n",
" else:\n",
" alignment[i] = span.start\n",
" return alignment\n",
" \n",
"\n",
"def test_tokens_plus(name: str=\"bert-base-multilingual-cased\"):\n",
" from transformers import AutoTokenizer\n",
" inputs = [\n",
" [\"Our\", \"band\", \"is\", \"called\", \"worlthatmustbedivided\", \"!\"],\n",
" [\"We\", \"rock\", \"!\"]\n",
" ]\n",
" tokenizer = AutoTokenizer.from_pretrained(name)\n",
" wordpieces = tokenizer(\n",
" inputs,\n",
" is_split_into_words=True,\n",
" add_special_tokens=True,\n",
" return_token_type_ids=True,\n",
" return_attention_mask=True,\n",
" return_length=True,\n",
" return_tensors=\"pt\",\n",
" padding=\"longest\"\n",
" )\n",
" tplus = TokensPlus(inputs, wordpieces)\n",
" assert len(tplus.tok2wp) == len(inputs) == len(tplus.input_ids)\n",
" for i, align in enumerate(tplus.tok2wp):\n",
" assert len(align) == len(inputs[i])\n",
" for j in align:\n",
" assert j >= 0 and j < tplus.input_ids.shape[1]\n",
" \n",
"test_tokens_plus()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The wrapped tokenizer will take a list-of-lists as input (the texts) and will output a `TokensPlus` object containing the fully padded batch of tokens. The wrapped transformer will take a list of `TokensPlus` objects and will output a list of 2-dimensional arrays.\n",
"\n",
"1. **TransformersTokenizer**: `List[List[str]]` → `TokensPlus`\n",
"2. **Transformer**: `TokensPlus` → `List[Array2d]`\n",
"\n",
"> 💡 Since we're adding type hints everywhere (and Thinc is fully typed, too), you can run your code through [`mypy`](https://mypy.readthedocs.io/en/stable/) to find type errors and inconsistencies. If you're using an editor like Visual Studio Code, you can enable `mypy` linting and type errors will be highlighted in real time as you write code.\n",
"\n",
"To use the tokenizer as a layer in our network, we register a new function that returns a Thinc `Model`. The function takes the name of the pretrained weights (e.g. `\"bert-base-multilingual-cased\"`) as an argument that can later be provided via the config. After loading the `AutoTokenizer`, we can stash it in the attributes. This lets us access it at any point later on via `model.attrs[\"tokenizer\"]`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import thinc\n",
"from thinc.api import Model\n",
"from transformers import AutoTokenizer\n",
"\n",
"@thinc.registry.layers(\"transformers_tokenizer.v1\")\n",
"def TransformersTokenizer(name: str) -> Model[List[List[str]], TokensPlus]:\n",
" def forward(model, inputs: List[List[str]], is_train: bool):\n",
" tokenizer = model.attrs[\"tokenizer\"]\n",
" wordpieces = tokenizer(\n",
" inputs,\n",
" is_split_into_words=True,\n",
" add_special_tokens=True,\n",
" return_token_type_ids=True,\n",
" return_attention_mask=True,\n",
" return_length=True,\n",
" return_tensors=\"pt\",\n",
" padding=\"longest\"\n",
" )\n",
" return TokensPlus(inputs, wordpieces), lambda d_tokens: []\n",
"\n",
" return Model(\"tokenizer\", forward, attrs={\"tokenizer\": AutoTokenizer.from_pretrained(name)})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The forward pass takes the model and a list-of-lists of strings and outputs the `TokensPlus` dataclass. It also outputs a dummy callback function, to meet the API contract for Thinc models. Even though there's no way we can meaningfully \"backpropagate\" this layer, we need to make sure the function has the right signature, so that it can be used interchangeably with other layers."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 2. Wrapping the transformer\n",
"\n",
"To load and wrap the transformer, we can use `transformers.AutoModel` and Thinc's `PyTorchWrapper`. The forward method of the wrapped model can take arbitrary positional arguments and keyword arguments. Here's what the wrapped model is going to look like:\n",
"\n",
"```python\n",
"@thinc.registry.layers(\"transformers_model.v1\")\n",
"def Transformer(name) -> Model[TokensPlus, List[Floats2d]]:\n",
" return PyTorchWrapper(\n",
" AutoModel.from_pretrained(name),\n",
" convert_inputs=convert_transformer_inputs,\n",
" convert_outputs=convert_transformer_outputs,\n",
" )\n",
"```\n",
"\n",
"The `Transformer` layer takes our `TokensPlus` dataclass as input and outputs a list of 2-dimensional arrays. The convert functions are used to **map inputs and outputs to and from the PyTorch model**. Each function should return the converted output, and a callback to use during the backward pass. To make the arbitrary positional and keyword arguments easier to manage, Thinc uses an `ArgsKwargs` dataclass, essentially a named tuple with `args` and `kwargs` that can be spread into a function as `*ArgsKwargs.args` and `**ArgsKwargs.kwargs`. The `ArgsKwargs` objects will be passed straight into the model in the forward pass, and straight into `torch.autograd.backward` during the backward pass."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import List, Tuple, Callable\n",
"from thinc.api import ArgsKwargs, torch2xp, xp2torch\n",
"from thinc.types import Floats2d\n",
"\n",
"def convert_transformer_inputs(model, tokens: TokensPlus, is_train):\n",
" kwargs = {\n",
" \"input_ids\": tokens.input_ids,\n",
" \"attention_mask\": tokens.attention_mask,\n",
" \"token_type_ids\": tokens.token_type_ids,\n",
" }\n",
" return ArgsKwargs(args=(), kwargs=kwargs), lambda dX: []\n",
"\n",
"\n",
"def convert_transformer_outputs(\n",
" model: Model,\n",
" inputs_outputs: Tuple[TokensPlus, Tuple[torch.Tensor]],\n",
" is_train: bool\n",
") -> Tuple[List[Floats2d], Callable]:\n",
" tplus, trf_outputs = inputs_outputs\n",
" wp_vectors = torch2xp(trf_outputs[0])\n",
" tokvecs = [wp_vectors[i, idx] for i, idx in enumerate(tplus.tok2wp)]\n",
"\n",
" def backprop(d_tokvecs: List[Floats2d]) -> ArgsKwargs:\n",
" # Restore entries for BOS and EOS markers\n",
" d_wp_vectors = model.ops.alloc3f(*trf_outputs[0].shape, dtype=\"f\")\n",
" for i, idx in enumerate(tplus.tok2wp):\n",
" d_wp_vectors[i, idx] += d_tokvecs[i]\n",
" return ArgsKwargs(\n",
" args=(trf_outputs[0],),\n",
" kwargs={\"grad_tensors\": xp2torch(d_wp_vectors)},\n",
" )\n",
"\n",
" return tokvecs, backprop"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The input and output transformation functions give you full control of how data is passed into and out of the underlying PyTorch model, so you can work with PyTorch layers that expect and return arbitrary objects. Putting it all together, we now have a nice layer that is configured with the name of a transformer model, that acts as a function mapping tokenized input into feature vectors."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import thinc\n",
"from thinc.api import PyTorchWrapper\n",
"from transformers import AutoModel\n",
"\n",
"@thinc.registry.layers(\"transformers_model.v1\")\n",
"def Transformer(name: str) -> Model[TokensPlus, List[Floats2d]]:\n",
" return PyTorchWrapper(\n",
" AutoModel.from_pretrained(name),\n",
" convert_inputs=convert_transformer_inputs,\n",
" convert_outputs=convert_transformer_outputs,\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can now combine the `TransformersTokenizer` and `Transformer` into a feed-forward network using the `chain` combinator. The `with_array` layer transforms a sequence of data into a contiguous 2d array on the way into and\n",
"out of a model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import chain, with_array, Softmax\n",
"\n",
"@thinc.registry.layers(\"TransformersTagger.v1\")\n",
"def TransformersTagger(starter: str, n_tags: int = 17) -> Model[List[List[str]], List[Floats2d]]:\n",
" return chain(\n",
" TransformersTokenizer(starter),\n",
" Transformer(starter),\n",
" with_array(Softmax(n_tags)),\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"\n",
"## Training the model\n",
"\n",
"### Setting up model and data\n",
"\n",
"Since we've registered all layers via `@thinc.registry.layers`, we can construct the model, its settings and other functions we need from a config (see `CONFIG` above). The result is a config object with a model, an optimizer, a function to calculate the loss and the training settings."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import Config, registry\n",
"\n",
"C = registry.resolve(Config().from_str(CONFIG))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = C[\"model\"]\n",
"optimizer = C[\"optimizer\"]\n",
"calculate_loss = C[\"loss\"]\n",
"cfg = C[\"training\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We’ve prepared a separate package [`ml-datasets`](https://github.com/explosion/ml-datasets) with loaders for some common datasets, including the AnCora data. If we're using a GPU, calling `ops.asarray` on the outputs ensures that they're converted to `cupy` arrays (instead of `numpy` arrays). Calling `Model.initialize` with a batch of inputs and outputs allows Thinc to **infer the missing dimensions**."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import ml_datasets\n",
"(train_X, train_Y), (dev_X, dev_Y) = ml_datasets.ud_ancora_pos_tags()\n",
"\n",
"train_Y = list(map(model.ops.asarray, train_Y)) # convert to cupy if needed\n",
"dev_Y = list(map(model.ops.asarray, dev_Y)) # convert to cupy if needed\n",
"\n",
"model.initialize(X=train_X[:5], Y=train_Y[:5])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Helper functions for training and evaluation\n",
"\n",
"Before we can train the model, we also need to set up the following helper functions for batching and evaluation:\n",
"\n",
"* **`minibatch_by_words`:** Group pairs of sequences into minibatches under `max_words` in size, considering padding. The size of a padded batch is the length of its longest sequence multiplied by the number of elements in the batch.\n",
"* **`evaluate_sequences`:** Evaluate the model sequences of two-dimensional arrays and return the score."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def minibatch_by_words(pairs, max_words):\n",
" pairs = list(zip(*pairs))\n",
" pairs.sort(key=lambda xy: len(xy[0]), reverse=True)\n",
" batch = []\n",
" for X, Y in pairs:\n",
" batch.append((X, Y))\n",
" n_words = max(len(xy[0]) for xy in batch) * len(batch)\n",
" if n_words >= max_words:\n",
" yield batch[:-1]\n",
" batch = [(X, Y)]\n",
" if batch:\n",
" yield batch\n",
"\n",
"def evaluate_sequences(model, Xs: List[Floats2d], Ys: List[Floats2d], batch_size: int) -> float:\n",
" correct = 0.0\n",
" total = 0.0\n",
" for X, Y in model.ops.multibatch(batch_size, Xs, Ys):\n",
" Yh = model.predict(X)\n",
" for yh, y in zip(Yh, Y):\n",
" correct += (y.argmax(axis=1) == yh.argmax(axis=1)).sum()\n",
" total += y.shape[0]\n",
" return float(correct / total)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### The training loop\n",
"\n",
"Transformers often learn best with **large batch sizes** – larger than fits in GPU memory. But you don't have to backprop the whole batch at once. Here we consider the \"logical\" batch size (number of examples per update) separately from the physical batch size. For the physical batch size, what we care about is the **number of words** (considering padding too). We also want to sort by length, for efficiency. \n",
"\n",
"At the end of the batch, we **call the optimizer** with the accumulated gradients, and **advance the learning rate schedules**. You might want to evaluate more often than once per epoch – that's up to you."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tqdm.notebook import tqdm\n",
"from thinc.api import fix_random_seed\n",
"\n",
"fix_random_seed(0)\n",
"\n",
"for epoch in range(cfg[\"n_epoch\"]):\n",
" batches = model.ops.multibatch(cfg[\"batch_size\"], train_X, train_Y, shuffle=True)\n",
" for outer_batch in tqdm(batches, leave=False):\n",
" for batch in minibatch_by_words(outer_batch, cfg[\"words_per_subbatch\"]):\n",
" inputs, truths = zip(*batch)\n",
" inputs = list(inputs)\n",
" guesses, backprop = model(inputs, is_train=True)\n",
" backprop(calculate_loss.get_grad(guesses, truths))\n",
" model.finish_update(optimizer)\n",
" optimizer.step_schedules()\n",
" score = evaluate_sequences(model, dev_X, dev_Y, cfg[\"batch_size\"])\n",
" print(epoch, f\"{score:.3f}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If you like, you can call `model.to_disk` or `model.to_bytes` to save the model weights to a directory or a bytestring."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
thinc-release-v9.1.1/examples/03_pos_tagger_basic_cnn.ipynb 0000664 0000000 0000000 00000032412 14670643317 0023764 0 ustar 00root root 0000000 0000000 {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Basic CNN part-of-speech tagger with Thinc\n",
"\n",
"This notebook shows how to implement a basic CNN for part-of-speech tagging model in Thinc (without external dependencies) and train the model on the Universal Dependencies [AnCora corpus](https://github.com/UniversalDependencies/UD_Spanish-AnCora). The tutorial shows three different workflows:\n",
"\n",
"1. Composing the model **in code** (basic usage)\n",
"2. Composing the model **via a config file only** (mostly to demonstrate advanced usage of configs)\n",
"3. Composing the model **in code and configuring it via config** (recommended)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install \"thinc>=8.0.0\" \"ml_datasets>=0.2.0\" \"tqdm>=4.41\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We start by making sure the computation is performed on GPU if available. `prefer_gpu` should be called right after importing Thinc, and it returns a boolean indicating whether the GPU has been activated."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import prefer_gpu\n",
"\n",
"prefer_gpu()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We also define the following helper functions for loading the data, and training and evaluating a given model. Don't forget to call `model.initialize` with a batch of input and output data to initialize the model and fill in any missing shapes."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import ml_datasets\n",
"from tqdm.notebook import tqdm\n",
"from thinc.api import fix_random_seed\n",
"\n",
"fix_random_seed(0)\n",
"\n",
"def train_model(model, optimizer, n_iter, batch_size):\n",
" (train_X, train_y), (dev_X, dev_y) = ml_datasets.ud_ancora_pos_tags()\n",
" model.initialize(X=train_X[:5], Y=train_y[:5])\n",
" for n in range(n_iter):\n",
" loss = 0.0\n",
" batches = model.ops.multibatch(batch_size, train_X, train_y, shuffle=True)\n",
" for X, Y in tqdm(batches, leave=False):\n",
" Yh, backprop = model.begin_update(X)\n",
" d_loss = []\n",
" for i in range(len(Yh)):\n",
" d_loss.append(Yh[i] - Y[i])\n",
" loss += ((Yh[i] - Y[i]) ** 2).sum()\n",
" backprop(d_loss)\n",
" model.finish_update(optimizer)\n",
" score = evaluate(model, dev_X, dev_y, batch_size)\n",
" print(f\"{n}\\t{loss:.2f}\\t{score:.3f}\")\n",
" \n",
"def evaluate(model, dev_X, dev_Y, batch_size):\n",
" correct = 0\n",
" total = 0\n",
" for X, Y in model.ops.multibatch(batch_size, dev_X, dev_Y):\n",
" Yh = model.predict(X)\n",
" for yh, y in zip(Yh, Y):\n",
" correct += (y.argmax(axis=1) == yh.argmax(axis=1)).sum()\n",
" total += y.shape[0]\n",
" return float(correct / total)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"\n",
"## 1. Composing the model in code\n",
"\n",
"Here's the model definition, using the `>>` operator for the `chain` combinator. The `strings2arrays` transform converts a sequence of strings to a list of arrays. `with_array` transforms sequences (the sequences of arrays) into a contiguous 2-dimensional array on the way into and out of the model it wraps. This means our model has the following signature: `Model[Sequence[str], Sequence[Array2d]]`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import Model, chain, strings2arrays, with_array, HashEmbed, expand_window, Relu, Softmax, Adam, warmup_linear\n",
"\n",
"width = 32\n",
"vector_width = 16\n",
"nr_classes = 17\n",
"learn_rate = 0.001\n",
"n_iter = 10\n",
"batch_size = 128\n",
"\n",
"with Model.define_operators({\">>\": chain}):\n",
" model = strings2arrays() >> with_array(\n",
" HashEmbed(nO=width, nV=vector_width, column=0)\n",
" >> expand_window(window_size=1)\n",
" >> Relu(nO=width, nI=width * 3)\n",
" >> Relu(nO=width, nI=width)\n",
" >> Softmax(nO=nr_classes, nI=width)\n",
" )\n",
"optimizer = Adam(learn_rate)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_model(model, optimizer, n_iter, batch_size)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"\n",
"## Composing the model via a config file\n",
"\n",
"Thinc's config system lets describe **arbitrary trees of objects**. The config can include values like hyperparameters or training settings, or references to functions and the values of their arguments. Thinc will then construct the config **bottom-up** – so you can define one function with its arguments, and then pass the return value into another function.\n",
"\n",
"If we want to rebuild the model defined above in a config file, we first need to break down its structure:\n",
"\n",
"* `chain` (any number of positional arguments)\n",
" * `strings2arrays` (no arguments)\n",
" * `with_array` (one argument **layer**)\n",
" * **layer:** `chain` (any number of positional arguments)\n",
" * `HashEmbed`\n",
" * `expand_window`\n",
" * `Relu`\n",
" * `Relu`\n",
" * `Softmax`\n",
"\n",
"`chain` takes a variable number of positional arguments (the layers to compose). In the config, positional arguments can be expressed using `*` in the dot notation. For example, `model.layer` could describe a function passed to `model` as the argument `layer`, while `model.*.relu` defines a positional argument passed to `model`. The name of the argument, e.g. `relu` – doesn't matter in this case. It just needs to be unique.\n",
"\n",
"> ⚠️ **Important note:** This example is mostly intended to show what's possible. We don't recommend \"programming via config files\" as shown here, since it doesn't really solve any problem and makes the model definition just as complicated. Instead, we recommend a hybrid approach: wrap the model definition in a registered function and configure it via the config."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"CONFIG = \"\"\"\n",
"[hyper_params]\n",
"width = 32\n",
"vector_width = 16\n",
"learn_rate = 0.001\n",
"\n",
"[training]\n",
"n_iter = 10\n",
"batch_size = 128\n",
"\n",
"[model]\n",
"@layers = \"chain.v1\"\n",
"\n",
"[model.*.strings2arrays]\n",
"@layers = \"strings2arrays.v1\"\n",
"\n",
"[model.*.with_array]\n",
"@layers = \"with_array.v1\"\n",
"\n",
"[model.*.with_array.layer]\n",
"@layers = \"chain.v1\"\n",
"\n",
"[model.*.with_array.layer.*.hashembed]\n",
"@layers = \"HashEmbed.v1\"\n",
"nO = ${hyper_params:width}\n",
"nV = ${hyper_params:vector_width}\n",
"column = 0\n",
"\n",
"[model.*.with_array.layer.*.expand_window]\n",
"@layers = \"expand_window.v1\"\n",
"window_size = 1\n",
"\n",
"[model.*.with_array.layer.*.relu1]\n",
"@layers = \"Relu.v1\"\n",
"nO = ${hyper_params:width}\n",
"nI = 96\n",
"\n",
"[model.*.with_array.layer.*.relu2]\n",
"@layers = \"Relu.v1\"\n",
"nO = ${hyper_params:width}\n",
"nI = ${hyper_params:width}\n",
"\n",
"[model.*.with_array.layer.*.softmax]\n",
"@layers = \"Softmax.v1\"\n",
"nO = 17\n",
"nI = ${hyper_params:width}\n",
"\n",
"[optimizer]\n",
"@optimizers = \"Adam.v1\"\n",
"learn_rate = ${hyper_params:learn_rate}\n",
"\"\"\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"When the config is loaded, it's first parsed as a dictionary and all references to values from other sections, e.g. `${hyper_params:width}` are replaced. The result is a nested dictionary describing the objects defined in the config."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import registry, Config\n",
"\n",
"config = Config().from_str(CONFIG)\n",
"config"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"`registry.resolve` then creates the objects and calls the functions **bottom-up**."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"C = registry.resolve(config)\n",
"C"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We now have a model, optimizer and training settings, built from the config, and can use them to train the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = C[\"model\"]\n",
"optimizer = C[\"optimizer\"]\n",
"n_iter = C[\"training\"][\"n_iter\"]\n",
"batch_size = C[\"training\"][\"batch_size\"]\n",
"train_model(model, optimizer, n_iter, batch_size)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"\n",
"## Composing the model with code and config\n",
"\n",
"The `@thinc.registry` decorator lets you register your own layers and model definitions, which can then be referenced in config files. This approach gives you the most flexibility, while also keeping your config and model definitions concise.\n",
"\n",
"> 💡 The function you register will be filled in by the config – e.g. the value of `width` defined in the config block will be passed in as the argument `width`. If arguments are missing, you'll see a validation error. If you're using **type hints** in the function, the values will be parsed to ensure they always have the right type. If they're invalid – e.g. if you're passing in a list as the value of `width` – you'll see an error. This makes it easier to prevent bugs caused by incorrect values lower down in the network."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import thinc\n",
"from thinc.api import Model, chain, strings2arrays, with_array, HashEmbed, expand_window, Relu, Softmax, Adam, warmup_linear\n",
"\n",
"@thinc.registry.layers(\"cnn_tagger.v1\")\n",
"def create_cnn_tagger(width: int, vector_width: int, nr_classes: int = 17):\n",
" with Model.define_operators({\">>\": chain}):\n",
" model = strings2arrays() >> with_array(\n",
" HashEmbed(nO=width, nV=vector_width, column=0)\n",
" >> expand_window(window_size=1)\n",
" >> Relu(nO=width, nI=width * 3)\n",
" >> Relu(nO=width, nI=width)\n",
" >> Softmax(nO=nr_classes, nI=width)\n",
" )\n",
" return model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The config would then only need to define one model block with `@layers = \"cnn_tagger.v1\"` and the function arguments. Whether you move them out to a section like `[hyper_params]` or just hard-code them into the block is up to you. The advantage of a separate section is that the values are **preserved in the parsed config object** (and not just passed into the function), so you can always print and view them."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"CONFIG = \"\"\"\n",
"[hyper_params]\n",
"width = 32\n",
"vector_width = 16\n",
"learn_rate = 0.001\n",
"\n",
"[training]\n",
"n_iter = 10\n",
"batch_size = 128\n",
"\n",
"[model]\n",
"@layers = \"cnn_tagger.v1\"\n",
"width = ${hyper_params:width}\n",
"vector_width = ${hyper_params:vector_width}\n",
"nr_classes = 17\n",
"\n",
"[optimizer]\n",
"@optimizers = \"Adam.v1\"\n",
"learn_rate = ${hyper_params:learn_rate}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"C = registry.resolve(Config().from_str(CONFIG))\n",
"C"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = C[\"model\"]\n",
"optimizer = C[\"optimizer\"]\n",
"n_iter = C[\"training\"][\"n_iter\"]\n",
"batch_size = C[\"training\"][\"batch_size\"]\n",
"train_model(model, optimizer, n_iter, batch_size)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
thinc-release-v9.1.1/examples/03_textcat_basic_neural_bow.ipynb 0000664 0000000 0000000 00000021042 14670643317 0024662 0 ustar 00root root 0000000 0000000 {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Basic neural bag-of-words text classifier with Thinc\n",
"\n",
"This notebook shows how to implement a simple neural text classification model in Thinc. Last tested with `thinc==8.0.13`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install thinc syntok \"ml_datasets>=0.2.0\" tqdm"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For simple and standalone tokenization, we'll use the [`syntok`](https://github.com/fnl/syntok) package and the following function:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from syntok.tokenizer import Tokenizer\n",
"\n",
"def tokenize_texts(texts):\n",
" tok = Tokenizer()\n",
" return [[token.value for token in tok.tokenize(text)] for text in texts]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setting up the data\n",
"\n",
"The `load_data` function loads the DBPedia Ontology dataset, converts and tokenizes the data and generates a simple vocabulary mapping. Instead of `ml_datasets.dbpedia` you can also try `ml_datasets.imdb` for the IMDB review dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import ml_datasets\n",
"import numpy\n",
"\n",
"def load_data():\n",
" train_data, dev_data = ml_datasets.dbpedia(train_limit=2000, dev_limit=2000)\n",
" train_texts, train_cats = zip(*train_data)\n",
" dev_texts, dev_cats = zip(*dev_data)\n",
" unique_cats = list(numpy.unique(numpy.concatenate((train_cats, dev_cats))))\n",
" nr_class = len(unique_cats)\n",
" print(f\"{len(train_data)} training / {len(dev_data)} dev\\n{nr_class} classes\")\n",
"\n",
" train_y = numpy.zeros((len(train_cats), nr_class), dtype=\"f\")\n",
" for i, cat in enumerate(train_cats):\n",
" train_y[i][unique_cats.index(cat)] = 1\n",
" dev_y = numpy.zeros((len(dev_cats), nr_class), dtype=\"f\")\n",
" for i, cat in enumerate(dev_cats):\n",
" dev_y[i][unique_cats.index(cat)] = 1\n",
"\n",
" train_tokenized = tokenize_texts(train_texts)\n",
" dev_tokenized = tokenize_texts(dev_texts)\n",
" # Generate simple vocab mapping, is 0\n",
" vocab = {}\n",
" count_id = 1\n",
" for text in train_tokenized:\n",
" for token in text:\n",
" if token not in vocab:\n",
" vocab[token] = count_id\n",
" count_id += 1\n",
" # Map texts using vocab\n",
" train_X = []\n",
" for text in train_tokenized:\n",
" train_X.append(numpy.array([vocab.get(t, 0) for t in text]))\n",
" dev_X = []\n",
" for text in dev_tokenized:\n",
" dev_X.append(numpy.array([vocab.get(t, 0) for t in text]))\n",
" return (train_X, train_y), (dev_X, dev_y), vocab"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Defining the model and config\n",
"\n",
"The model takes a list of 2-dimensional arrays (the tokenized texts mapped to vocab IDs) and outputs a 2d array. Because the embed layer's `nV` dimension (the number of entries in the lookup table) depends on the vocab and the training data, it's passed in as an argument and registered as a **reference**. This makes it easy to retrieve it later on by calling `model.get_ref(\"embed\")`, so we can set its `nV` dimension."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import List\n",
"import thinc\n",
"from thinc.api import Model, chain, list2ragged, with_array, reduce_mean, Softmax\n",
"from thinc.types import Array2d\n",
"\n",
"@thinc.registry.layers(\"EmbedPoolTextcat.v1\")\n",
"def EmbedPoolTextcat(embed: Model[Array2d, Array2d]) -> Model[List[Array2d], Array2d]:\n",
" with Model.define_operators({\">>\": chain}):\n",
" model = with_array(embed) >> list2ragged() >> reduce_mean() >> Softmax()\n",
" model.set_ref(\"embed\", embed)\n",
" return model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The config defines the top-level model using the registered `EmbedPoolTextcat` function, and the `embed` argument, referencing the `Embed` layer."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"CONFIG = \"\"\"\n",
"[hyper_params]\n",
"width = 64\n",
"\n",
"[model]\n",
"@layers = \"EmbedPoolTextcat.v1\"\n",
"\n",
"[model.embed]\n",
"@layers = \"Embed.v1\"\n",
"nO = ${hyper_params:width}\n",
"\n",
"[optimizer]\n",
"@optimizers = \"Adam.v1\"\n",
"learn_rate = 0.001\n",
"\n",
"[training]\n",
"batch_size = 8\n",
"n_iter = 10\n",
"\"\"\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Training setup\n",
"\n",
"When the config is loaded, it's first parsed as a dictionary and all references to values from other sections, e.g. `${hyper_params:width}` are replaced. The result is a nested dictionary describing the objects defined in the config. `registry.resolve` then creates the objects and calls the functions **bottom-up**."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import registry, Config\n",
"\n",
"C = registry.resolve(Config().from_str(CONFIG))\n",
"C"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Once the data is loaded, we'll know the vocabulary size and can set the dimension on the embedding layer. `model.get_ref(\"embed\")` returns the layer defined as the ref `\"embed\"` and the `set_dim` method lets you set a value for a dimension. To fill in the other missing shapes, we can call `model.initialize` with some input and output data. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"(train_X, train_y), (dev_X, dev_y), vocab = load_data()\n",
"\n",
"batch_size = C[\"training\"][\"batch_size\"]\n",
"optimizer = C[\"optimizer\"]\n",
"model = C[\"model\"]\n",
"model.get_ref(\"embed\").set_dim(\"nV\", len(vocab) + 1)\n",
"\n",
"model.initialize(X=train_X, Y=train_y)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def evaluate_model(model, dev_X, dev_Y, batch_size):\n",
" correct = 0.0\n",
" total = 0.0\n",
" for X, Y in model.ops.multibatch(batch_size, dev_X, dev_Y):\n",
" Yh = model.predict(X)\n",
" for j in range(len(Yh)):\n",
" correct += Yh[j].argmax(axis=0) == Y[j].argmax(axis=0)\n",
" total += len(Y)\n",
" return float(correct / total)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"\n",
"## Training the model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import fix_random_seed\n",
"from tqdm.notebook import tqdm\n",
"\n",
"fix_random_seed(0)\n",
"for n in range(C[\"training\"][\"n_iter\"]):\n",
" loss = 0.0\n",
" batches = model.ops.multibatch(batch_size, train_X, train_y, shuffle=True)\n",
" for X, Y in tqdm(batches, leave=False):\n",
" Yh, backprop = model.begin_update(X)\n",
" d_loss = []\n",
" for i in range(len(Yh)):\n",
" d_loss.append(Yh[i] - Y[i])\n",
" loss += ((Yh[i] - Y[i]) ** 2).sum()\n",
" backprop(numpy.array(d_loss))\n",
" model.finish_update(optimizer)\n",
" score = evaluate_model(model, dev_X, dev_y, batch_size)\n",
" print(f\"{n}\\t{loss:.2f}\\t{score:.3f}\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
thinc-release-v9.1.1/examples/04_configure_gpu_memory.ipynb 0000664 0000000 0000000 00000010457 14670643317 0024065 0 ustar 00root root 0000000 0000000 {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Using a single memory pool for Cupy and PyTorch or TensorFlow\n",
"\n",
"Requesting memory from a GPU device directly is expensive, so most deep learning libraries will over-allocate, and maintain an internal pool of memory they will keep a hold of, instead of returning it back to the device. This means the libraries don't by default play well together: they all expect to be the single consumer of the GPU memory, so they hog it selfishly. If you use two frameworks together, you can get unexpected out-of-memory errors.\n",
"\n",
"Thinc's internal models use cupy for GPU operations, and cupy offers a nice solution for this problem. You can provide cupy with a custom memory allocation function, which allows us to route cupy's memory requests via another library. This avoids the memory problem when you use PyTorch and cupy together, or when you use cupy and Tensorflow together. We don't yet have a similar solution for using PyTorch and Tensorflow together, however.\n",
"\n",
"To start with, we call the `require_gpu()` function, which tells Thinc and PyTorch to allocate on GPU."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install \"thinc>=8.0.0\" torch \"tensorflow>=2.0\" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import require_gpu\n",
"\n",
"require_gpu()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We then call `use_pytorch_for_gpu_memory()` to set up the allocation strategy. Now when `cupy` tries to request GPU memory, it will do so by asking PyTorch, rather than asking the GPU directly."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import use_pytorch_for_gpu_memory\n",
"\n",
"use_pytorch_for_gpu_memory()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To test that it's working, we make a little function that allocates an array using cupy, and prints its size, along with the current size of PyTorch's memory pool. Notice the over-allocation: PyTorch grabs a *much* bigger chunk of memory than just our little array. That's why we need to have only one memory pool."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import cupy \n",
"import torch.cuda\n",
"\n",
"def allocate_cupy_tensor(size):\n",
" array = cupy.zeros((size,), dtype=\"f\")\n",
" print(array.size, torch.cuda.max_memory_allocated())\n",
" return array\n",
"allocate_cupy_tensor(16)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can also see that even when we free the tensor, the memory isn't immediately released. On the other hand, we don't need to resize the memory pool when we make a second small allocation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tensorflow\n",
"\n",
"with tensorflow.device('/device:GPU:0'):\n",
" arr = allocate_cupy_tensor(1000)\n",
" arr = None\n",
" arr = allocate_cupy_tensor(1000)\n",
" arr = None"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If we make a huge allocation, we'll have to resize the pool though. Let's make sure the pool resizes properly, and that memory is freed when the tensors are removed."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"arr = allocate_cupy_tensor(1000)\n",
"for _ in range(100):\n",
" arr2 = allocate_cupy_tensor(900000)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
thinc-release-v9.1.1/examples/05_benchmarking_layers.ipynb 0000664 0000000 0000000 00000017374 14670643317 0023656 0 ustar 00root root 0000000 0000000 {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Benchmarking Thinc layers with a custom `benchmark` layer\n",
"\n",
"This notebook shows how to write a `benchmark` layer that can wrap any layer(s) in your network and that **logs the execution times** of the initialization, forward pass and backward pass. The benchmark layer can also be mapped to an operator like `@` to make it easy to add debugging to your network."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install \"thinc>=8.0.0\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To log the results, we first set up a custom logger using Python's `logging` module. You could also just print the stats instead, but using `logging` is cleaner, since it lets other users modify the logger's behavior more easily, and separates the logs from other output and write it to a file (e.g. if you're benchmarking several layers during training). The following logging config will output the date and time, the name of the logger and the logged results."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"\n",
"logger = logging.getLogger(\"thinc:benchmark\")\n",
"if not logger.hasHandlers(): # prevent Jupyter from adding multiple loggers\n",
" formatter = logging.Formatter('%(asctime)s %(name)s %(message)s', datefmt=\"%Y-%m-%d %H:%M:%S\")\n",
" handler = logging.StreamHandler()\n",
" handler.setFormatter(formatter)\n",
" logger.addHandler(handler)\n",
" logger.setLevel(logging.DEBUG)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Here's a minimalistic time logger that can be initialized with the name of a given layer, and can track several events (e.g. `\"forward\"` and `\"backward\"`). When the `TimeLogger.end` method is called, the output is formatted nicely and the elapsed time is logged with the logger name and colored label."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from timeit import default_timer\n",
"from wasabi import color\n",
"\n",
"\n",
"class TimeLogger:\n",
" def __init__(self, name):\n",
" self.colors = {\"forward\": \"green\", \"backward\": \"blue\"}\n",
" self.name = name\n",
" self.timers = {}\n",
" \n",
" def start(self, name):\n",
" self.timers[name] = default_timer()\n",
" \n",
" def end(self, name):\n",
" result = default_timer() - self.timers[name]\n",
" label = f\"{name.upper():<8}\"\n",
" label = color(label, self.colors.get(name), bold=True)\n",
" logger.debug(f\"{self.name:<12} | {label} | {result:.6f}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The `benchmark` layer now has to wrap the forward pass, backward pass and initialization of the layer it wraps and log the execution times. It then returns a Thinc model instance with the custom `forward` function and a custom `init` function. We'll also allow setting a custom `name` to make it easier to tell multiple wrapped benchmark layers apart."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import Model\n",
" \n",
"def benchmark(layer, name=None):\n",
" name = name if name is not None else layer.name\n",
" t = TimeLogger(name)\n",
" \n",
" def init(model, X, Y):\n",
" t.start(\"init\")\n",
" result = layer.initialize(X, Y)\n",
" t.end(\"init\")\n",
" return result\n",
" \n",
" def forward(model, X, is_train):\n",
" t.start(\"forward\")\n",
" layer_Y, layer_callback = layer(X, is_train=is_train)\n",
" t.end(\"forward\")\n",
" \n",
" def backprop(dY):\n",
" t.start(\"backward\")\n",
" result = layer_callback(dY)\n",
" t.end(\"backward\")\n",
" return result\n",
" \n",
" return layer_Y, backprop\n",
" \n",
" return Model(f\"benchmark:{layer.name}\", forward, init=init) "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"\n",
"## Usage examples\n",
"\n",
"### Using the `benchmark` layer as a function\n",
"\n",
"We can now wrap one or more layers (including nested layers) with the `benchmark` function. This is the original model:\n",
"\n",
"```python\n",
"model = chain(Linear(1), Linear(1))\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy\n",
"from thinc.api import chain, Linear\n",
"\n",
"X = numpy.zeros((1, 2), dtype=\"f\")\n",
"\n",
"model = benchmark(chain(benchmark(Linear(1)), Linear(1)), name=\"outer\")\n",
"model.initialize(X=X)\n",
"Y, backprop = model(X, is_train=False)\n",
"dX = backprop(Y)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Using the `benchmark` layer as an operator\n",
"\n",
"Alternatively, we can also use `Model.define_operators` to map `benchmark` to an operator like `@`. The left argument of the operator is the first argument passed into the function (the layer) and the right argument is the second argument (the name). The following example wraps the whole network (two chained `Linear` layers) in a benchmark layer named `\"outer\"`, and the first `Linear` layer in a benchmark layer named `\"first\"`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"from thinc.api import Model\n",
"\n",
"with Model.define_operators({\">>\": chain, \"@\": benchmark}):\n",
" model = (Linear(1) @ \"first\" >> Linear(1)) @ \"outer\"\n",
" \n",
"model.initialize(X=X)\n",
"Y, backprop = model(X, is_train=True)\n",
"dX = backprop(Y)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Using the `benchmark` layer during training"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import Model, chain, Relu, Softmax, Adam\n",
"\n",
"n_hidden = 32\n",
"dropout = 0.2\n",
"\n",
"with Model.define_operators({\">>\": chain, \"@\": benchmark}):\n",
" model = (\n",
" Relu(nO=n_hidden, dropout=dropout) @ \"relu1\"\n",
" >> Relu(nO=n_hidden, dropout=dropout) @ \"relu2\"\n",
" >> Softmax()\n",
" )\n",
"\n",
"train_X = numpy.zeros((5, 784), dtype=\"f\")\n",
"train_Y = numpy.zeros((540, 10), dtype=\"f\")\n",
"\n",
"model.initialize(X=train_X[:5], Y=train_Y[:5])\n",
"optimizer = Adam(0.001)\n",
"for i in range(10):\n",
" for X, Y in model.ops.multibatch(8, train_X, train_Y, shuffle=True):\n",
" Yh, backprop = model.begin_update(X)\n",
" backprop(Yh - Y)\n",
" model.finish_update(optimizer)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
thinc-release-v9.1.1/examples/05_visualizing_models.ipynb 0000664 0000000 0000000 00000013465 14670643317 0023553 0 ustar 00root root 0000000 0000000 {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Visualizing Thinc models (with shape inference)\n",
"\n",
"This is a simple notebook showing how you can easily visualize your Thinc models and their inputs and outputs using [Graphviz](https://www.graphviz.org/) and [`pydot`](https://github.com/pydot/pydot). If you're installing `pydot` via the notebook, make sure to restart your kernel (or Google Colab VM – [see here](https://stackoverflow.com/questions/49853303/how-to-install-pydot-graphviz-on-google-colab) for details) after installation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"!pip install \"thinc>=8.0.0\" pydot graphviz svgwrite"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's start by defining a model with a number of layers chained together using the `chain` combinator:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import chain, expand_window, Relu, Maxout, Linear, Softmax\n",
"\n",
"n_hidden = 32\n",
"dropout = 0.2\n",
"\n",
"model= chain(\n",
" expand_window(3),\n",
" Relu(nO=n_hidden, dropout=dropout, normalize=True),\n",
" Maxout(nO=n_hidden * 4),\n",
" Linear(nO=n_hidden * 2),\n",
" Relu(nO=n_hidden, dropout=dropout, normalize=True),\n",
" Linear(nO=n_hidden),\n",
" Relu(nO=n_hidden, dropout=dropout),\n",
" Softmax(),\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Here's the visualization we want to achieve for this model: the **name of the layer** or combination of layers and the **input and output dimensions**. Note that `>>` refers to a chaining of layers.\n",
"\n",
"\n",
"\n",
"This means we need to add a node for each layer, edges connecting the nodes to the previous node (except for the first/last), and labels like `\"name|(nO,nI)\"` – for instance, `\"maxout|(128,32)\"`. Here's a simple function that takes a Thinc layer (i.e. a `Model` instance) and returns a label with the layer name and its dimensions, if available:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_label(layer):\n",
" layer_name = layer.name\n",
" nO = layer.get_dim(\"nO\") if layer.has_dim(\"nO\") else \"?\"\n",
" nI = layer.get_dim(\"nI\") if layer.has_dim(\"nI\") else \"?\"\n",
" return f\"{layer.name}|({nO}, {nI})\".replace(\">\", \">\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can now use `pydot` to create a visualization for a given model. You can customize the direction of the notes by setting `\"rankdir\"` (e.g. `\"TB\"` for \"top to bottom\") and adjust the font and arrow styling. To make the visualization render nicely in a notebook, we can call into IPython's utilities."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import pydot\n",
"from IPython.display import SVG, display\n",
"\n",
"def visualize_model(model):\n",
" dot = pydot.Dot()\n",
" dot.set(\"rankdir\", \"LR\")\n",
" dot.set_node_defaults(shape=\"record\", fontname=\"arial\", fontsize=\"10\")\n",
" dot.set_edge_defaults(arrowsize=\"0.7\")\n",
" nodes = {}\n",
" for i, layer in enumerate(model.layers):\n",
" label = get_label(layer)\n",
" node = pydot.Node(layer.id, label=label)\n",
" dot.add_node(node)\n",
" nodes[layer.id] = node\n",
" if i == 0:\n",
" continue\n",
" from_node = nodes[model.layers[i - 1].id]\n",
" to_node = nodes[layer.id]\n",
" if not dot.get_edge(from_node, to_node):\n",
" dot.add_edge(pydot.Edge(from_node, to_node))\n",
" display(SVG(dot.create_svg()))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Calling `visualize_model` on the model defined above will render the visualization. However, most dimensions will now show up as `(?, ?)`, instead of the *actual* dimensions as shown in the graph above. That's because Thinc allows **defining models with missing shapes** and is able to **infer the missing shapes from the data** when you call `model.initialize`. The model visualized here doesn't define all its shapes, so the labels are incomplete."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"visualize_model(model)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To fill in the missing shapes, we can call `model.initialize` with examples of the expected input `X` and output `Y`. Running `visualize_model` again now shows the complete shapes."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy\n",
"\n",
"X = numpy.zeros((5, 784), dtype=\"f\")\n",
"Y = numpy.zeros((54000, 10), dtype=\"f\")\n",
"model.initialize(X=X, Y=Y)\n",
"\n",
"visualize_model(model)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
thinc-release-v9.1.1/examples/06_predicting_like_terms.ipynb 0000664 0000000 0000000 00000054310 14670643317 0024205 0 ustar 00root root 0000000 0000000 {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Predicting Like Polynomial Terms\n",
"\n",
"Remember in Algebra how you had to combine \"like terms\" to simplify problems? \n",
"\n",
"You'd see expressions such as `60 + 2x^3 - 6x + x^3 + 17x` in which there are **5** total terms but only **4** are \"like terms\". \n",
"\n",
"`2x^3` and `x^3` are like, and `-6x` and `17x` are like, while `60` doesn't have any like siblings.\n",
"\n",
"Can we teach a model to predict that there are `4` like terms in the above expression?\n",
"\n",
"Let's give it a shot using [Mathy](https://mathy.ai) to generate math problems and [thinc](https://github.com/explosion/thinc) to build a regression model that outputs the number of like terms in each input problem."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install \"thinc>=8.0.0\" mathy_core"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Sketch a Model\n",
"\n",
"Before we get started it can be good to have an idea of what input/output shapes we want for our model.\n",
"\n",
"We'll convert text math problems into lists of lists of integers, so our example (X) type can be represented using thinc's `Ints2d` type.\n",
"\n",
"The model will predict how many like terms there are in each sequence, so our output (Y) type can represented with the `Floats2d` type.\n",
"\n",
"Knowing the thinc types we want enables us to create an alias for our model, so we only have to type out the verbose generic signature once."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import List\n",
"from thinc.api import Model\n",
"from thinc.types import Ints2d, Floats1d\n",
"\n",
"ModelX = Ints2d\n",
"ModelY = Floats1d\n",
"ModelT = Model[List[ModelX], ModelY]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Encode Text Inputs\n",
"\n",
"Mathy generates ascii-math problems and we have to encode them into integers that the model can process. \n",
"\n",
"To do this we'll build a vocabulary of all the possible characters we'll see, and map each input character to its index in the list.\n",
"\n",
"For math problems our vocabulary will include all the characters of the alphabet, numbers 0-9, and special characters like `*`, `-`, `.`, etc."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import List\n",
"from thinc.api import Model\n",
"from thinc.types import Ints2d, Floats1d\n",
"from thinc.api import Ops, get_current_ops\n",
"\n",
"vocab = \" .+-/^*()[]-01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n",
"\n",
"def encode_input(text: str) -> ModelX:\n",
" ops: Ops = get_current_ops()\n",
" indices: List[List[int]] = []\n",
" for c in text:\n",
" if c not in vocab:\n",
" raise ValueError(f\"'{c}' missing from vocabulary in text: {text}\")\n",
" indices.append([vocab.index(c)])\n",
" return ops.asarray2i(indices)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Try It\n",
"\n",
"Let's try it out on some fixed data to be sure it works. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"outputs = encode_input(\"4+2\")\n",
"assert outputs[0][0] == vocab.index(\"4\")\n",
"assert outputs[1][0] == vocab.index(\"+\")\n",
"assert outputs[2][0] == vocab.index(\"2\")\n",
"print(outputs)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Generate Math Problems\n",
"\n",
"We'll use Mathy to generate random polynomial problems with a variable number of like terms. The generated problems will act as training data for our model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import List, Optional, Set\n",
"import random\n",
"from mathy_core.problems import gen_simplify_multiple_terms\n",
"\n",
"def generate_problems(number: int, exclude: Optional[Set[str]] = None) -> List[str]:\n",
" if exclude is None:\n",
" exclude = set()\n",
" problems: List[str] = []\n",
" while len(problems) < number:\n",
" text, complexity = gen_simplify_multiple_terms(\n",
" random.randint(2, 6),\n",
" noise_probability=1.0,\n",
" noise_terms=random.randint(2, 10),\n",
" op=[\"+\", \"-\"],\n",
" )\n",
" assert text not in exclude, \"duplicate problem generated!\"\n",
" exclude.add(text)\n",
" problems.append(text)\n",
" return problems"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Try It"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"generate_problems(10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Count Like Terms\n",
"\n",
"Now that we can generate input problems, we'll need a function that can count the like terms in each one and return the value for use as a label.\n",
"\n",
"To accomplish this we'll use a few helpers from mathy to enumerate the terms and compare them to see if they're like."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import Optional, List, Dict\n",
"from mathy_core import MathExpression, ExpressionParser, get_terms, get_term_ex, TermEx\n",
"from mathy_core.problems import mathy_term_string\n",
"\n",
"parser = ExpressionParser()\n",
"\n",
"def count_like_terms(input_problem: str) -> int:\n",
" expression: MathExpression = parser.parse(input_problem)\n",
" term_nodes: List[MathExpression] = get_terms(expression)\n",
" node_groups: Dict[str, List[MathExpression]] = {}\n",
" for term_node in term_nodes:\n",
" ex: Optional[TermEx] = get_term_ex(term_node)\n",
" assert ex is not None, f\"invalid expression {term_node}\"\n",
" key = mathy_term_string(variable=ex.variable, exponent=ex.exponent)\n",
" if key == \"\":\n",
" key = \"const\"\n",
" if key not in node_groups:\n",
" node_groups[key] = [term_node]\n",
" else:\n",
" node_groups[key].append(term_node)\n",
" like_terms = 0\n",
" for k, v in node_groups.items():\n",
" if len(v) <= 1:\n",
" continue\n",
" like_terms += len(v)\n",
" return like_terms"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Try It"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"assert count_like_terms(\"4x - 2y + q\") == 0\n",
"assert count_like_terms(\"x + x + z\") == 2\n",
"assert count_like_terms(\"4x + 2x - x + 7\") == 3"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Generate Problem/Answer pairs\n",
"\n",
"Now that we can generate problems, count the number of like terms in them, and encode their text into integers, we have the pieces required to generate random problems and answers that we can train a neural network with.\n",
"\n",
"Let's write a function that will return a tuple of: the problem text, its encoded example form, and the output label."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import Tuple\n",
"from thinc.api import Ops, get_current_ops\n",
"\n",
"def to_example(input_problem: str) -> Tuple[str, ModelX, ModelY]:\n",
" ops: Ops = get_current_ops()\n",
" encoded_input = encode_input(input_problem)\n",
" like_terms = count_like_terms(input_problem)\n",
" return input_problem, encoded_input, ops.asarray1f([like_terms])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Try It"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"text, X, Y = to_example(\"x+2x\")\n",
"assert text == \"x+2x\"\n",
"assert X[0] == vocab.index(\"x\")\n",
"assert Y[0] == 2\n",
"print(text, X, Y)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Build a Model\n",
"\n",
"Now that we can generate X/Y values, let's define our model and verify that it can process a single input/output.\n",
"\n",
"For this we'll use Thinc and the `define_operators` context manager to connect the pieces together using overloaded operators for `chain` and `clone` operations."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import List\n",
"from thinc.model import Model\n",
"from thinc.api import concatenate, chain, clone, list2ragged\n",
"from thinc.api import reduce_sum, Mish, with_array, Embed, residual\n",
"\n",
"def build_model(n_hidden: int, dropout: float = 0.1) -> ModelT:\n",
" with Model.define_operators({\">>\": chain, \"|\": concatenate, \"**\": clone}):\n",
" model = (\n",
" # Iterate over each element in the batch\n",
" with_array(\n",
" # Embed the vocab indices\n",
" Embed(n_hidden, len(vocab), column=0)\n",
" # Activate each batch of embedding sequences separately first\n",
" >> Mish(n_hidden, dropout=dropout)\n",
" )\n",
" # Convert to ragged so we can use the reduction layers\n",
" >> list2ragged()\n",
" # Sum the features for each batch input\n",
" >> reduce_sum()\n",
" # Process with a small resnet\n",
" >> residual(Mish(n_hidden, normalize=True)) ** 4\n",
" # Convert (batch_size, n_hidden) to (batch_size, 1)\n",
" >> Mish(1)\n",
" )\n",
" return model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Try It\n",
"\n",
"Let's pass an example through the model to make sure we have all the sizes right."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"text, X, Y = to_example(\"14x + 2y - 3x + 7x\")\n",
"m = build_model(12)\n",
"m.initialize([X], m.ops.asarray(Y, dtype=\"f\"))\n",
"mY = m.predict([X])\n",
"print(mY.shape)\n",
"assert mY.shape == (1, 1)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Generate Training Datasets\n",
"\n",
"Now that we can generate examples and we have a model that can process them, let's generate random unique training and evaluation datasets.\n",
"\n",
"For this we'll write another helper function that can generate (n) training examples and respects an exclude list to avoid letting examples from the training/test sets overlap."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import Tuple, Optional, Set, List\n",
"\n",
"DatasetTuple = Tuple[List[str], List[ModelX], List[ModelY]]\n",
"\n",
"def generate_dataset(\n",
" size: int,\n",
" exclude: Optional[Set[str]] = None,\n",
") -> DatasetTuple:\n",
" ops: Ops = get_current_ops()\n",
" texts: List[str] = generate_problems(size, exclude=exclude)\n",
" examples: List[ModelX] = []\n",
" labels: List[ModelY] = []\n",
" for i, text in enumerate(texts):\n",
" text, x, y = to_example(text)\n",
" examples.append(x)\n",
" labels.append(y)\n",
"\n",
" return texts, examples, labels"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Try It\n",
"\n",
"Generate a small dataset to be sure everything is working as expected"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"texts, x, y = generate_dataset(10)\n",
"assert len(texts) == 10\n",
"assert len(x) == 10\n",
"assert len(y) == 10"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Evaluate Model Performance\n",
"\n",
"We're almost ready to train our model, we just need to write a function that will check a given trained model against a given dataset and return a 0-1 score of how accurate it was.\n",
"\n",
"We'll use this function to print the score as training progresses and print final test predictions at the end of training."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import List\n",
"from wasabi import msg\n",
"\n",
"def evaluate_model(\n",
" model: ModelT,\n",
" *,\n",
" print_problems: bool = False,\n",
" texts: List[str],\n",
" X: List[ModelX],\n",
" Y: List[ModelY],\n",
"):\n",
" Yeval = model.predict(X)\n",
" correct_count = 0\n",
" print_n = 12\n",
" if print_problems:\n",
" msg.divider(f\"eval samples max({print_n})\")\n",
" for text, y_answer, y_guess in zip(texts, Y, Yeval):\n",
" y_guess = round(float(y_guess))\n",
" correct = y_guess == int(y_answer)\n",
" print_fn = msg.fail\n",
" if correct:\n",
" correct_count += 1\n",
" print_fn = msg.good\n",
" if print_problems and print_n > 0:\n",
" print_n -= 1\n",
" print_fn(f\"Answer[{int(y_answer[0])}] Guess[{y_guess}] Text: {text}\")\n",
" if print_problems:\n",
" print(f\"Model predicted {correct_count} out of {len(X)} correctly.\")\n",
" return correct_count / len(X)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Try It\n",
"\n",
"Let's try it out with an untrained model and expect to see a really sad score."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"texts, X, Y = generate_dataset(128)\n",
"m = build_model(12)\n",
"m.initialize(X, m.ops.asarray(Y, dtype=\"f\"))\n",
"# Assume the model should do so poorly as to round down to 0\n",
"assert round(evaluate_model(m, texts=texts, X=X, Y=Y)) == 0"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Train/Evaluate a Model\n",
"\n",
"The final helper function we need is one to train and evaluate a model given two input datasets. \n",
"\n",
"This function does a few things:\n",
"\n",
" 1. Create an Adam optimizer we can use for minimizing the model's prediction error.\n",
" 2. Loop over the given training dataset (epoch) number of times.\n",
" 3. For each epoch, make batches of (batch_size) examples. For each batch(X), predict the number of like terms (Yh) and subtract the known answers (Y) to get the prediction error. Update the model using the optimizer with the calculated error.\n",
" 5. After each epoch, check the model performance against the evaluation dataset.\n",
" 6. Save the model weights for the best score out of all the training epochs.\n",
" 7. After all training is done, restore the best model and print results from the evaluation set."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from thinc.api import Adam\n",
"from wasabi import msg\n",
"import numpy\n",
"from tqdm.auto import tqdm\n",
"\n",
"def train_and_evaluate(\n",
" model: ModelT,\n",
" train_tuple: DatasetTuple,\n",
" eval_tuple: DatasetTuple,\n",
" *,\n",
" lr: float = 3e-3,\n",
" batch_size: int = 64,\n",
" epochs: int = 48,\n",
") -> float:\n",
" (train_texts, train_X, train_y) = train_tuple\n",
" (eval_texts, eval_X, eval_y) = eval_tuple\n",
" msg.divider(\"Train and Evaluate Model\")\n",
" msg.info(f\"Batch size = {batch_size}\\tEpochs = {epochs}\\tLearning Rate = {lr}\")\n",
"\n",
" optimizer = Adam(lr)\n",
" best_score: float = 0.0\n",
" best_model: Optional[bytes] = None\n",
" for n in range(epochs):\n",
" loss = 0.0\n",
" batches = model.ops.multibatch(batch_size, train_X, train_y, shuffle=True)\n",
" for X, Y in tqdm(batches, leave=False, unit=\"batches\"):\n",
" Y = model.ops.asarray(Y, dtype=\"float32\")\n",
" Yh, backprop = model.begin_update(X)\n",
" err = Yh - Y\n",
" backprop(err)\n",
" loss += (err ** 2).sum()\n",
" model.finish_update(optimizer)\n",
" score = evaluate_model(model, texts=eval_texts, X=eval_X, Y=eval_y)\n",
" if score > best_score:\n",
" best_model = model.to_bytes()\n",
" best_score = score\n",
" print(f\"{n}\\t{score:.2f}\\t{loss:.2f}\")\n",
"\n",
" if best_model is not None:\n",
" model.from_bytes(best_model)\n",
" print(f\"Evaluating with best model\")\n",
" score = evaluate_model(\n",
" model, texts=eval_texts, print_problems=True, X=eval_X, Y=eval_y\n",
" )\n",
" print(f\"Final Score: {score}\")\n",
" return score\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We'll generate the dataset first, so we can iterate on the model without having to spend time generating examples for each run. This also ensures we have the same dataset across different model runs, to make it easier to compare performance."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_size = 1024 * 8\n",
"test_size = 2048\n",
"seen_texts: Set[str] = set()\n",
"with msg.loading(f\"Generating train dataset with {train_size} examples...\"):\n",
" train_dataset = generate_dataset(train_size, seen_texts)\n",
"msg.good(f\"Train set created with {train_size} examples.\")\n",
"with msg.loading(f\"Generating eval dataset with {test_size} examples...\"):\n",
" eval_dataset = generate_dataset(test_size, seen_texts)\n",
"msg.good(f\"Eval set created with {test_size} examples.\")\n",
"init_x = train_dataset[1][:2]\n",
"init_y = train_dataset[2][:2]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we can build, train, and evaluate our model!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = build_model(64)\n",
"model.initialize(init_x, init_y)\n",
"train_and_evaluate(\n",
" model, train_dataset, eval_dataset, lr=2e-3, batch_size=64, epochs=16\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Intermediate Exercise\n",
"\n",
"The model we built can train up to ~80% given 100 or more epochs. Improve the model architecture so that it trains to a similar accuracy while requiring fewer epochs or a smaller dataset size."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import List\n",
"from thinc.model import Model\n",
"from thinc.types import Array2d, Array1d\n",
"from thinc.api import chain, clone, list2ragged, reduce_mean, Mish, with_array, Embed, residual\n",
"\n",
"def custom_model(n_hidden: int, dropout: float = 0.1) -> Model[List[Array2d], Array2d]:\n",
" # Put your custom architecture here\n",
" return build_model(n_hidden, dropout)\n",
"\n",
"model = custom_model(64)\n",
"model.initialize(init_x, init_y)\n",
"train_and_evaluate(\n",
" model, train_dataset, eval_dataset, lr=2e-3, batch_size=64, epochs=16\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Advanced Exercise\n",
"\n",
"Rewrite the model to encode the whole expression with a BiLSTM, and then generate pairs of terms, using the BiLSTM vectors. Over each pair of terms, predict whether the terms are alike or unlike."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from dataclasses import dataclass\n",
"from thinc.types import Array2d, Ragged\n",
"from thinc.model import Model\n",
"\n",
"\n",
"@dataclass\n",
"class Comparisons:\n",
" data: Array2d # Batch of vectors for each pair\n",
" indices: Array2d # Int array of shape (N, 3), showing the (batch, term1, term2) positions\n",
"\n",
"def pairify() -> Model[Ragged, Comparisons]:\n",
" \"\"\"Create pair-wise comparisons for items in a sequence. For each sequence of N\n",
" items, there will be (N**2-N)/2 comparisons.\"\"\"\n",
" ...\n",
"\n",
"def predict_over_pairs(model: Model[Array2d, Array2d]) -> Model[Comparisons, Comparisons]:\n",
" \"\"\"Apply a prediction model over a batch of comparisons. Outputs a Comparisons\n",
" object where the data is the scores. The prediction model should predict over\n",
" two classes, True and False.\"\"\"\n",
" ...\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
thinc-release-v9.1.1/examples/benchmarks/ 0000775 0000000 0000000 00000000000 14670643317 0020401 5 ustar 00root root 0000000 0000000 thinc-release-v9.1.1/examples/benchmarks/lstm_tagger.py 0000664 0000000 0000000 00000010512 14670643317 0023262 0 ustar 00root root 0000000 0000000 """
Compare tagging speed for LSTM, using dummy data.
Results on CPU laptop:
PyTorchLSTM.v1:
Predicted 39017 4.033804399892688 Ys[0] 0.05000001 5.551115e-17
LSTM (NumpyOps):
Predicted 39018 13.174870599992573 Ys[0] 0.05000001 5.551115e-17
So PyTorch is 3x faster currently.
"""
from typing import List
import typer
import tqdm
import numpy.random
from timeit import default_timer as timer
from thinc.api import Model, Config, registry
from thinc.api import chain, list2padded, with_array, with_padded
from thinc.api import to_categorical, set_current_ops
from thinc.api import Ops, NumpyOps, CupyOps, fix_random_seed, require_gpu
from thinc.types import Ints1d, Ints2d, Floats2d, Padded
CONFIG = """
[data]
n_samples = 1000
n_tags = 20
n_vocab = 10000
length_mean = 50
length_variance = 5
[common]
width = 300
[model]
@layers = "LSTMTagger.v1"
[model.embed]
@layers = "Embed.v1"
nO = ${common:width}
nV = ${data:n_vocab}
column = 0
[model.encode]
@layers = "LSTM.v1"
nO = ${common:width}
nI = ${common:width}
depth = 2
bi = true
[model.predict]
@layers = "Linear.v1"
nO = ${data:n_tags}
"""
@registry.layers("LSTMTagger.v1")
def build_tagger(
embed: Model[Ints2d, Floats2d],
encode: Model[Padded, Padded],
predict: Model[Floats2d, Floats2d],
) -> Model[List[Ints1d], Padded]:
model = chain(
with_array(embed), with_padded(encode), with_array(predict), list2padded()
)
model.set_ref("embed", embed)
model.set_ref("encode", encode)
model.set_ref("predict", model.layers[-1])
return model
def get_dummy_data(n_samples, n_tags, n_vocab, length_mean, length_variance):
Xs = []
Ys = []
for _ in range(n_samples):
length = numpy.random.normal(size=1, scale=length_variance) + length_mean
shape = (max(1, int(length)),)
X = numpy.random.uniform(0, n_vocab - 1, shape)
Y = numpy.random.uniform(0, n_tags - 1, shape)
assert X.size, length
assert Y.size, length
Xs.append(X.reshape((-1, 1)).astype("i"))
Ys.append(to_categorical(Y.astype("i")))
return Xs, Ys
def run_forward(model, Xs, n_times=1):
total = 0.0
for _ in range(n_times):
for batch in tqdm.tqdm(Xs):
Y = model.predict(batch)
total += Y.data.sum()
return float(total)
def run_forward_backward(model, batches, n_times=1):
total = 0.0
for _ in range(n_times):
for X, Y in tqdm.tqdm(batches):
Yh, get_dX = model.begin_update(X)
get_dX(Yh)
total += Yh.data.sum()
return float(total)
def set_backend(name, gpu_id):
global CONFIG
if name == "generic":
set_current_ops(Ops())
else:
if gpu_id == -1:
set_current_ops(NumpyOps(use_blis=True))
else:
set_current_ops(CupyOps())
if name == "pytorch":
import torch
torch.set_num_threads(1)
CONFIG = CONFIG.replace("LSTM.v1", "PyTorchLSTM.v1")
def main(
numpy: bool = False, pytorch: bool = False, generic: bool = False, gpu_id: int = -1
):
global CONFIG
fix_random_seed(0)
if gpu_id >= 0:
require_gpu(gpu_id)
print("Set GPU", gpu_id)
backends = {"pytorch": pytorch, "numpy": numpy, "generic": generic}
for name, use_backend in backends.items():
if not use_backend:
print(f"Skipping {name}")
continue
set_backend(name, gpu_id)
print("Getting data")
C = registry.resolve(Config().from_str(CONFIG))
model = C["model"]
X, Y = get_dummy_data(**C["data"])
print("Copy to device")
X = [model.ops.asarray(x) for x in X]
Y = [model.ops.asarray(y) for y in Y]
print("Begin init", len(X))
model.initialize(X=X[:5])
print("Pre-batch")
n_words = sum(len(x) for x in X)
batches = model.ops.multibatch(16, X, Y)
batches = [(model.layers[0].predict(x), y) for x, y in batches]
model.layers.pop(0)
print("Start")
start_time = timer()
total = run_forward(model, [x for x, y in batches])
end_time = timer()
print(name, n_words, total, end_time - start_time)
start_time = timer()
total = run_forward_backward(model, batches)
end_time = timer()
print(name, n_words, total, end_time - start_time)
if __name__ == "__main__":
typer.run(main)
thinc-release-v9.1.1/examples/benchmarks/mappers.py 0000664 0000000 0000000 00000007622 14670643317 0022431 0 ustar 00root root 0000000 0000000 from thinc.api import remap_ids_v2
from thinc.api import premap_ids
from thinc.api import chain, Embed, HashEmbed
import time
import random
import numpy as np
import cupy as cp
N_symbols = 200000
N_tokens = 50000
N_batch = 500
N_columns = 4
N_dim = 300
mapper = {}
numbers = list(range(N_symbols))
random.shuffle(numbers)
for v, k in enumerate(numbers):
mapper[k] = v
class time_context:
"""Register the running time of a context."""
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, type, value, traceback):
self.elapsed = time.perf_counter() - self.start
def speed_test_no_column():
remap = remap_ids_v2(mapper)
premap = premap_ids(mapper)
keys = np.random.randint(0, N_symbols, N_tokens)
with time_context() as elapsed:
for i in range(100):
remap(keys, False)
remaptime = elapsed.elapsed
with time_context() as elapsed:
for i in range(100):
premap(keys, False)
premaptime = elapsed.elapsed
print("remap", remaptime)
print("premap", premaptime)
print("speedup", remaptime / premaptime)
def speed_test_column():
remap = remap_ids_v2(mapper, column=3)
premap = premap_ids(mapper, column=3)
keys = np.random.randint(0, N_symbols, (N_tokens, N_columns))
with time_context() as elapsed:
for i in range(100):
remap(keys, False)
remaptime = elapsed.elapsed
with time_context() as elapsed:
for i in range(100):
premap(keys, False)
premaptime = elapsed.elapsed
print("remap", remaptime)
print("premap", premaptime)
print("speedup", remaptime / premaptime)
def speed_test_cupy():
remap = remap_ids_v2(mapper)
premap = premap_ids(mapper)
keys = cp.random.randint(0, N_symbols, N_tokens)
with time_context() as elapsed:
for i in range(100):
remap(keys, False)
remaptime = elapsed.elapsed
with time_context() as elapsed:
for i in range(100):
premap(keys, False)
premaptime = elapsed.elapsed
print("remap", remaptime)
print("premap", premaptime)
print("speedup", remaptime / premaptime)
def speed_test_with_embed():
remap = chain(remap_ids_v2(mapper), Embed(N_dim, N_symbols))
premap = chain(premap_ids(mapper), Embed(N_dim, N_symbols))
remap.initialize()
premap.initialize()
keys = np.random.randint(0, N_symbols, N_tokens)
with time_context() as elapsed:
for i in range(100):
remap(keys, False)
remaptime = elapsed.elapsed
with time_context() as elapsed:
for i in range(100):
premap(keys, False)
premaptime = elapsed.elapsed
print("remap", remaptime)
print("premap", premaptime)
print("speedup", remaptime / premaptime)
def speed_test_cupy_with_embed():
remap = chain(remap_ids_v2(mapper), Embed(N_dim, N_symbols))
premap = chain(premap_ids(mapper), Embed(N_dim, N_symbols))
remap.initialize()
premap.initialize()
keys = cp.random.randint(0, N_symbols, N_tokens)
with time_context() as elapsed:
for i in range(100):
remap(keys, False)
remaptime = elapsed.elapsed
with time_context() as elapsed:
for i in range(100):
premap(keys, False)
premaptime = elapsed.elapsed
print("remap", remaptime)
print("premap", premaptime)
print("speedup", remaptime / premaptime)
def speed_test_hashembed():
embed = HashEmbed(N_dim, N_symbols)
embed.initialize()
keys = np.random.randint(0, N_symbols, N_tokens)
with time_context() as elapsed:
for i in range(100):
embed(keys, False)
print(elapsed.elapsed)
print("No columns")
speed_test_no_column()
print("Columns")
speed_test_column()
print("Cupy")
speed_test_cupy()
print("With Embed")
speed_test_with_embed()
print("Cupy With Embed")
speed_test_cupy_with_embed()
print("HashEmbed speed")
speed_test_hashembed()
thinc-release-v9.1.1/examples/bloom_embeddings.ipynb 0000664 0000000 0000000 00000267034 14670643317 0022634 0 ustar 00root root 0000000 0000000 {
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"executionInfo": {
"elapsed": 3527,
"status": "ok",
"timestamp": 1648197799884,
"user": {
"displayName": "Vincent D. Warmerdam",
"photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh4KYzhhhK0YDTnAQsUIaQPw-0dKIP-kLBID7nFdQ=s64",
"userId": "05641618555626735638"
},
"user_tz": -60
},
"id": "ssfd1qsSxtRS",
"outputId": "7c6e6585-2362-4be2-da05-db00a0307fe6",
"tags": []
},
"outputs": [],
"source": [
"%pip install mmh3 numpy"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## The Bloom embeddings algorithm\n",
"\n",
"In a normal embedding table, each word-string is mapped to a distinct ID.\n",
"Usually these IDs will be sequential, so if you have a vocabulary of 100 words,\n",
"your words will be mapped to numbers `range(100)`. The sequential IDs can then\n",
"be used as indices into an embedding table: if you have 100 words in your\n",
"vocabulary, you have 100 rows in the table, and each word receives its own\n",
"vector.\n",
"\n",
"However, there's no limit to the number of unique words that might occur in a\n",
"sample of text, while we definitely want a limited number of rows in our\n",
"embedding table. Some of the rows in our table will therefore need to be shared\n",
"between multiple words in our vocabulary. One obvious solution is to set aside a\n",
"single vector in the table. Words 0-98 will each receive their own vector, while\n",
"all other words are assigned to vector 99.\n",
"\n",
"However, this asks vector 99 to do a lot of work. What if we gave more vectors\n",
"to the unknown words?"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"executionInfo": {
"elapsed": 10,
"status": "ok",
"timestamp": 1648197799885,
"user": {
"displayName": "Vincent D. Warmerdam",
"photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh4KYzhhhK0YDTnAQsUIaQPw-0dKIP-kLBID7nFdQ=s64",
"userId": "05641618555626735638"
},
"user_tz": -60
},
"id": "Eb895XpR-VUB"
},
"outputs": [],
"source": [
"def get_row(word_id, number_vector=100, number_oov=10):\n",
" if word_id < (number_vector - number_oov):\n",
" return word_id\n",
" else:\n",
" return number_vector + (word_id % number_oov)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This gives the model a little more resolution for the unknown words. If all\n",
"out-of-vocabulary words are assigned the same vector, then they'll all look\n",
"identical to the model. Even if the training data actually includes information\n",
"that shows two different out-of-vocabulary words have important, different\n",
"implications -- for instance, if one word is a strong indicator of positive\n",
"sentiment, while the other is a strong indicator of negative sentiment -- the\n",
"model won't be able to tell them apart. However, if we have 10 buckets for the\n",
"unknown words, we might get lucky, and assign these words to different buckets.\n",
"If so, the model would be able to learn that one of the unknown-word vectors\n",
"makes positive sentiment more likely, while the other vector makes negative\n",
"sentiment more likely.\n",
"\n",
"If this is good, then why not do more of it? Bloom embeddings are like an\n",
"extreme version, where _every_ word is handled like the unknown words above:\n",
"there are 100 vectors for the \"unknown\" portion, and 0 for the \"known\" portion.\n",
"\n",
"So far, this approach seems weird, but not necessarily good. The part that makes\n",
"it unfairly effective is the next step: by simply doing the same thing multiple\n",
"times, we can greatly improve the resolution, and have unique representations\n",
"for far more words than we have vectors. The code in full:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"executionInfo": {
"elapsed": 8,
"status": "ok",
"timestamp": 1648197799885,
"user": {
"displayName": "Vincent D. Warmerdam",
"photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh4KYzhhhK0YDTnAQsUIaQPw-0dKIP-kLBID7nFdQ=s64",
"userId": "05641618555626735638"
},
"user_tz": -60
},
"id": "tTkqM3EixhWM"
},
"outputs": [],
"source": [
"import numpy\n",
"import mmh3\n",
"\n",
"def allocate(n_vectors, n_dimensions):\n",
" table = numpy.zeros((n_vectors, n_dimensions), dtype='f')\n",
" table += numpy.random.uniform(-0.1, 0.1, table.size).reshape(table.shape)\n",
" return table\n",
"\n",
"def get_vector(table, word):\n",
" hash1 = mmh3.hash(word, seed=0)\n",
" hash2 = mmh3.hash(word, seed=1)\n",
" row1 = hash1 % table.shape[0]\n",
" row2 = hash2 % table.shape[0]\n",
" return table[row1] + table[row2]\n",
"\n",
"def update_vector(table, word, d_vector):\n",
" hash1 = mmh3.hash(word, seed=0)\n",
" hash2 = mmh3.hash(word, seed=1)\n",
" row1 = hash1 % table.shape[0]\n",
" row2 = hash2 % table.shape[0]\n",
" table[row1] -= 0.001 * d_vector\n",
" table[row2] -= 0.001 * d_vector"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In this example, we've used two keys, assigned from two random hash functions.\n",
"It's unlikely that two words will collide on both keys, so by simply summing the\n",
"vectors together, we'll assign most words a unique representation.\n",
"\n",
"For the sake of illustration, let's step through a very small example,\n",
"explicitly.\n",
"\n",
"Let's say we have this vocabulary of 20 words:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"executionInfo": {
"elapsed": 8,
"status": "ok",
"timestamp": 1648197799885,
"user": {
"displayName": "Vincent D. Warmerdam",
"photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh4KYzhhhK0YDTnAQsUIaQPw-0dKIP-kLBID7nFdQ=s64",
"userId": "05641618555626735638"
},
"user_tz": -60
},
"id": "QMaz-mr9xjPG"
},
"outputs": [],
"source": [
"vocab = ['apple', 'strawberry', 'orange', 'juice', 'drink', 'smoothie',\n",
" 'eat', 'fruit', 'health', 'wellness', 'steak', 'fries', 'ketchup',\n",
" 'burger', 'chips', 'lobster', 'caviar', 'service', 'waiter', 'chef']"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We'll embed these into two dimensions. Normally this would give us a table of\n",
"`(20, 2)` floats, which we would randomly initialise. With the hashing trick, we\n",
"can make the table smaller. Let's give it 15 vectors:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"executionInfo": {
"elapsed": 8,
"status": "ok",
"timestamp": 1648197799886,
"user": {
"displayName": "Vincent D. Warmerdam",
"photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh4KYzhhhK0YDTnAQsUIaQPw-0dKIP-kLBID7nFdQ=s64",
"userId": "05641618555626735638"
},
"user_tz": -60
},
"id": "LNg60lvqxkmP"
},
"outputs": [],
"source": [
"normal_embed = numpy.random.uniform(-0.1, 0.1, (20, 2))\n",
"hashed_embed = numpy.random.uniform(-0.1, 0.1, (15, 2))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In the normal table, we want to map each word in our vocabulary to its own\n",
"vector:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"executionInfo": {
"elapsed": 5,
"status": "ok",
"timestamp": 1648197801914,
"user": {
"displayName": "Vincent D. Warmerdam",
"photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh4KYzhhhK0YDTnAQsUIaQPw-0dKIP-kLBID7nFdQ=s64",
"userId": "05641618555626735638"
},
"user_tz": -60
},
"id": "9wFC_iR_xlBH"
},
"outputs": [],
"source": [
"word2id = {}\n",
"def get_normal_vector(word, table):\n",
" if word not in word2id.keys():\n",
" word2id[word] = len(word2id)\n",
" return table[word2id[word]]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The hashed table only has 15 rows, so some words will have to share. We'll\n",
"handle this by mapping the word into an arbitrary integer – called a \"hash\n",
"value\". The hash function will return an arbitrary integer, which we'll mod into\n",
"the range `(0, 15)`. Importantly, we need to be able to compute _multiple,\n",
"distinct_ hash values for each key – so Python's built-in hash function is\n",
"inconvenient. We'll therefore use MurmurHash.\n",
"\n",
"Let's see what keys we get for our 20 vocabulary items, using MurmurHash:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"executionInfo": {
"elapsed": 4,
"status": "ok",
"timestamp": 1648197804508,
"user": {
"displayName": "Vincent D. Warmerdam",
"photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh4KYzhhhK0YDTnAQsUIaQPw-0dKIP-kLBID7nFdQ=s64",
"userId": "05641618555626735638"
},
"user_tz": -60
},
"id": "Gs69d2KRxmg9"
},
"outputs": [],
"source": [
"hashes1 = [mmh3.hash(w, 1) % 15 for w in vocab]\n",
"assert hashes1 == [3, 6, 4, 13, 8, 3, 13, 1, 9, 12, 11, 4, 2, 13, 5, 10, 0, 2, 10, 13]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As you can see, some keys are shared between multiple words, while 2/15 keys are\n",
"unoccupied. This is obviously unideal! If multiple words have the same key,\n",
"they'll map to the same vector – as far as the model is concerned, \"strawberry\"\n",
"and \"heart\" will be indistinguishable. It won't be clear which word was used –\n",
"they have the same representation.\n",
"\n",
"To address this, we simply hash the words again, this time using a different\n",
"seed – so that we get a different set of arbitrary keys:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"executionInfo": {
"elapsed": 3,
"status": "ok",
"timestamp": 1648197804508,
"user": {
"displayName": "Vincent D. Warmerdam",
"photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh4KYzhhhK0YDTnAQsUIaQPw-0dKIP-kLBID7nFdQ=s64",
"userId": "05641618555626735638"
},
"user_tz": -60
},
"id": "acpOxkljynPo"
},
"outputs": [],
"source": [
"from collections import Counter\n",
"\n",
"hashes2 = [mmh3.hash(w, 2) % 15 for w in vocab]\n",
"assert len(Counter(hashes2).most_common()) == 12"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This one's even worse – 3 keys unoccupied! But our strategy is not to keep drawing until we get a favorable seed. Instead, consider this:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"executionInfo": {
"elapsed": 3,
"status": "ok",
"timestamp": 1648197805024,
"user": {
"displayName": "Vincent D. Warmerdam",
"photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh4KYzhhhK0YDTnAQsUIaQPw-0dKIP-kLBID7nFdQ=s64",
"userId": "05641618555626735638"
},
"user_tz": -60
},
"id": "W7tfxLQBytWP"
},
"outputs": [],
"source": [
"assert len(Counter(zip(hashes1, hashes2))) == 20"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"By combining the results from the two hashes, our 20 words distribute perfectly,\n",
"into 20 unique combinations. This makes sense: we expect to have some words\n",
"overlapping on one of the keys, but we'd have to be very unlucky for a pair of\n",
"words to overlap on _both_ keys.\n",
"\n",
"This means that if we simply add the two vectors together, each word once more\n",
"has a unique representation:"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"executionInfo": {
"elapsed": 2,
"status": "ok",
"timestamp": 1648197805764,
"user": {
"displayName": "Vincent D. Warmerdam",
"photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh4KYzhhhK0YDTnAQsUIaQPw-0dKIP-kLBID7nFdQ=s64",
"userId": "05641618555626735638"
},
"user_tz": -60
},
"id": "wI5yayZWyxVP",
"outputId": "4f62b77d-709f-483b-a0bb-4e5fbe68d5fe"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"apple -0.033 -0.012\n",
"strawberry -0.023 -0.037\n",
"orange 0.158 -0.031\n",
"juice -0.045 0.139\n",
"drink 0.024 0.030\n",
"smoothie 0.121 0.076\n",
"eat -0.093 0.153\n",
"fruit 0.083 0.052\n",
"health 0.064 -0.046\n",
"wellness 0.143 0.112\n",
"steak 0.011 -0.097\n",
"fries 0.036 0.041\n",
"ketchup 0.081 0.029\n",
"burger -0.045 0.139\n",
"chips -0.118 -0.090\n",
"lobster 0.016 -0.107\n",
"caviar -0.033 -0.012\n",
"service 0.081 0.029\n",
"waiter 0.179 -0.038\n",
"chef -0.047 0.062\n"
]
}
],
"source": [
"for word in vocab:\n",
" key1 = mmh3.hash(word, 0) % 15\n",
" key2 = mmh3.hash(word, 1) % 15\n",
" vector = hashed_embed[key1] + hashed_embed[key2]\n",
" print(word, '%.3f %.3f' % tuple(vector))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We now have a function that maps our 20 words to 20 unique vectors – but we're\n",
"storing weights for only 15 vectors in memory. Now the question is: will we be\n",
"able to find values for these weights that let us actually map words to useful\n",
"vectors?\n",
"\n",
"Let's do a quick experiment to see how this works. We'll assign \"true\" values\n",
"for our little vocabulary, and see how well we can approximate them with our\n",
"compressed table. To get the \"true\" values, we _could_ put the \"science\" in data\n",
"science, and drag the words around into reasonable-looking clusters. But for our\n",
"purposes, the actual \"true\" values don't matter. We'll therefore just do a\n",
"simulation: we'll assign random vectors as the \"true\" state, and see if we can\n",
"learn values for the hash embeddings that match them.\n",
"\n",
"The learning procedure will be a simple stochastic gradient descent:"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"colab": {
"background_save": true
},
"executionInfo": {
"elapsed": 3,
"status": "aborted",
"timestamp": 1648199186370,
"user": {
"displayName": "Vincent D. Warmerdam",
"photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh4KYzhhhK0YDTnAQsUIaQPw-0dKIP-kLBID7nFdQ=s64",
"userId": "05641618555626735638"
},
"user_tz": -60
},
"id": "ET4n9AA5y0fX"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"499 43.47128495286\n"
]
}
],
"source": [
"import numpy\n",
"import numpy.random as random\n",
"import mmh3\n",
"\n",
"random.seed(0)\n",
"nb_epoch = 500\n",
"learn_rate = 0.001\n",
"nr_hash_vector = 1000\n",
"\n",
"words = [str(i) for i in range(2000)]\n",
"true_vectors = numpy.random.uniform(-0.1, 0.1, (len(words), 10))\n",
"hash_vectors = numpy.random.uniform(-0.1, 0.1, (nr_hash_vector, 10))\n",
"examples = list(zip(words, true_vectors))\n",
"\n",
"for epoch in range(nb_epoch):\n",
" random.shuffle(examples)\n",
" loss=0.\n",
" for word, truth in examples:\n",
" key1 = mmh3.hash(word, 0) % nr_hash_vector\n",
" key2 = mmh3.hash(word, 1) % nr_hash_vector\n",
" hash_vector = hash_vectors[key1] + hash_vectors[key2]\n",
" diff = hash_vector - truth\n",
" hash_vectors[key1] -= learn_rate * diff\n",
" hash_vectors[key2] -= learn_rate * diff\n",
" loss += (diff**2).sum()\n",
"print(epoch, loss)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"It's worth taking some time to play with this simulation. You can start by doing\n",
"some sanity checks:\n",
"\n",
"- How does the loss change with `nr_hash_vector`?\n",
"- If you remove `key2`, does the loss go up?\n",
"- What happens if you add more hash keys?\n",
"- What happens as the vocabulary size increases?\n",
"- What happens when more dimensions are added?\n",
"- How sensitive are the hash embeddings to the initial conditions? If we change the random seed, do we ever get unlucky?\n",
"\n",
"If you play with the simulation for a while, you'll start to get a good feel for\n",
"the dynamics, and hopefully you'll have a clear idea of why the technique works."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "TuRoY34yQb0v",
"tags": []
},
"source": [
"## Bonus Section \n",
"\n",
"To make it easier for folks to try out a whole bunch of settings we'd added a little bit of code below that makes it easier to get relevant visuals."
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"executionInfo": {
"elapsed": 2919,
"status": "ok",
"timestamp": 1648200042349,
"user": {
"displayName": "Vincent D. Warmerdam",
"photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh4KYzhhhK0YDTnAQsUIaQPw-0dKIP-kLBID7nFdQ=s64",
"userId": "05641618555626735638"
},
"user_tz": -60
},
"id": "NPVKX_pbXJYs",
"outputId": "fc046666-d690-426d-b8a7-dc557f12832d",
"tags": []
},
"outputs": [],
"source": [
"%pip install altair pandas"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"colab": {
"background_save": true
},
"id": "nHd1wo6m1q-J"
},
"outputs": [],
"source": [
"from functools import reduce \n",
"\n",
"\n",
"def calc_losses(epochs=500, seed=0, learn_rate=0.001, nr_hash_vector=1000, n_hash=3, n_words=1000, size_vector=10):\n",
" random.seed(seed)\n",
" nb_epoch = epochs\n",
" learn_rate = learn_rate\n",
" nr_hash_vector = nr_hash_vector\n",
"\n",
" words = [str(i) for i in range(n_words)]\n",
" true_vectors = numpy.random.uniform(-0.1, 0.1, (len(words), size_vector))\n",
" hash_vectors = numpy.random.uniform(-0.1, 0.1, (nr_hash_vector, size_vector))\n",
" examples = list(zip(words, true_vectors))\n",
"\n",
" losses = []\n",
" for epoch in range(nb_epoch):\n",
" random.shuffle(examples)\n",
" loss=0.\n",
" for word, truth in examples:\n",
" keys = [mmh3.hash(word, k) % nr_hash_vector for k in range(n_hash)]\n",
" hash_vector = reduce(lambda a, b: a + b, [hash_vectors[k] for k in keys])\n",
" diff = hash_vector - truth\n",
" for key in keys:\n",
" hash_vectors[key] -= learn_rate * diff\n",
" loss += (diff**2).sum()\n",
" losses.append(loss)\n",
" return losses\n",
"\n",
"data = []\n",
"for n_hash in [1, 2, 3, 4, 5]:\n",
" losses = calc_losses(nr_hash_vector=2_000, n_words=10_000, n_hash=n_hash, epochs=150)\n",
" data = data + [{\"loss\": l, \"nr_hash_vector\": nr_hash_vector, \"n_hash\": str(n_hash), \"epoch\": e} for e, l in enumerate(losses)]"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"colab": {
"background_save": true
},
"id": "P0Q0k9bjXMm3"
},
"outputs": [
{
"data": {
"text/html": [
"\n",
"\n",
""
],
"text/plain": [
"alt.Chart(...)"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import pandas as pd\n",
"import altair as alt\n",
"\n",
"source = pd.DataFrame(data)\n",
"\n",
"(alt.Chart(source)\n",
" .mark_line()\n",
" .encode(x='epoch', y='loss', color='n_hash')\n",
" .properties(width=600, height=250)\n",
" .interactive())"
]
}
],
"metadata": {
"colab": {
"authorship_tag": "ABX9TyPAXtr/TeMWYmJkxrXcAPIT",
"collapsed_sections": [],
"name": "bloom_embeddings.ipynb",
"version": ""
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
thinc-release-v9.1.1/examples/mnist.py 0000664 0000000 0000000 00000003067 14670643317 0017776 0 ustar 00root root 0000000 0000000 """
PyTorch version: https://github.com/pytorch/examples/blob/master/mnist/main.py
TensorFlow version: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/mnist/mnist.py
"""
# pip install thinc ml_datasets typer
from thinc.api import Model, chain, Relu, Softmax, Adam
import ml_datasets
from wasabi import msg
from tqdm import tqdm
import typer
def main(
n_hidden: int = 256, dropout: float = 0.2, n_iter: int = 10, batch_size: int = 128
):
# Define the model
model: Model = chain(
Relu(nO=n_hidden, dropout=dropout),
Relu(nO=n_hidden, dropout=dropout),
Softmax(),
)
# Load the data
(train_X, train_Y), (dev_X, dev_Y) = ml_datasets.mnist()
# Set any missing shapes for the model.
model.initialize(X=train_X[:5], Y=train_Y[:5])
train_data = model.ops.multibatch(batch_size, train_X, train_Y, shuffle=True)
dev_data = model.ops.multibatch(batch_size, dev_X, dev_Y)
# Create the optimizer.
optimizer = Adam(0.001)
for i in range(n_iter):
for X, Y in tqdm(train_data, leave=False):
Yh, backprop = model.begin_update(X)
backprop(Yh - Y)
model.finish_update(optimizer)
# Evaluate and print progress
correct = 0
total = 0
for X, Y in dev_data:
Yh = model.predict(X)
correct += (Yh.argmax(axis=1) == Y.argmax(axis=1)).sum()
total += Yh.shape[0]
score = correct / total
msg.row((i, f"{score:.3f}"), widths=(3, 5))
if __name__ == "__main__":
typer.run(main)
thinc-release-v9.1.1/examples/transformers_tagger.py 0000664 0000000 0000000 00000017322 14670643317 0022721 0 ustar 00root root 0000000 0000000 """Train a transformer tagging model, using Huggingface's Transformers."""
# pip install thinc ml_datasets typer tqdm transformers torch
from dataclasses import dataclass
from typing import List, Optional, Tuple, Callable
import torch
from pathlib import Path
from transformers import AutoTokenizer, AutoModel
import thinc
from thinc.api import PyTorchWrapper, Softmax, chain, with_array, Model, Config
from thinc.api import torch2xp, xp2torch, SequenceCategoricalCrossentropy
from thinc.api import prefer_gpu, use_pytorch_for_gpu_memory
from thinc.types import Floats2d, ArgsKwargs
import ml_datasets
import tqdm
import typer
CONFIG = """
[model]
@layers = "TransformersTagger.v1"
starter = "bert-base-multilingual-cased"
[optimizer]
@optimizers = "RAdam.v1"
weight_decay = 1e-8
[optimizer.learn_rate]
@schedules = "warmup_linear.v1"
initial_rate = 0.01
warmup_steps = 3000
total_steps = 6000
[training]
batch_size = 128
words_per_subbatch = 2000
n_epoch = 10
"""
def main(path: Optional[Path] = None, out_dir: Optional[Path] = None):
if prefer_gpu():
print("Using gpu!")
use_pytorch_for_gpu_memory()
# You can edit the CONFIG string within the file, or copy it out to
# a separate file and pass in the path.
if path is None:
config = Config().from_str(CONFIG)
else:
config = Config().from_disk(path)
# resolve constructs objects whenever you have blocks with an @ key.
# In the optimizer block we write @optimizers = "Adam.v1". This tells Thinc
# to use registry.optimizers to fetch the "Adam.v1" function. You can
# register your own functions as well and build up trees of objects.
C = thinc.registry.resolve(config)
words_per_subbatch = C["training"]["words_per_subbatch"]
n_epoch = C["training"]["n_epoch"]
batch_size = C["training"]["batch_size"]
model = C["model"]
optimizer = C["optimizer"]
calculate_loss = SequenceCategoricalCrossentropy()
(train_X, train_Y), (dev_X, dev_Y) = ml_datasets.ud_ancora_pos_tags()
# Convert the outputs to cupy (if we're using that)
train_Y = list(map(model.ops.asarray, train_Y))
dev_Y = list(map(model.ops.asarray, dev_Y))
# Pass in a small batch of data, to fill in missing shapes
model.initialize(X=train_X[:5], Y=train_Y[:5])
for epoch in range(n_epoch):
# Transformers often learn best with large batch sizes -- larger than
# fits in GPU memory. But you don't have to backprop the whole batch
# at once. Here we consider the "logical" batch size (number of examples
# per update) separately from the physical batch size.
batches = model.ops.multibatch(batch_size, train_X, train_Y, shuffle=True)
for outer_batch in tqdm.tqdm(batches, leave=False):
# For the physical batch size, what we care about is the number
# of words (considering padding too). We also want to sort by
# length, for efficiency.
for batch in minibatch_by_words(outer_batch, words_per_subbatch):
inputs, truths = zip(*batch)
guesses, backprop = model(inputs, is_train=True)
backprop(calculate_loss.get_grad(guesses, truths))
# At the end of the batch, we call the optimizer with the accumulated
# gradients, and advance the learning rate schedules.
model.finish_update(optimizer)
optimizer.step_schedules()
# You might want to evaluate more often than once per epoch; that's up
# to you.
score = evaluate_sequences(model, dev_X, dev_Y, 128)
print(epoch, f"{score:.3f}")
if out_dir:
model.to_disk(out_dir / f"{epoch}.bin")
@dataclass
class TokensPlus:
"""Dataclass to hold the output of the Huggingface 'batch_encode_plus' method."""
input_ids: torch.Tensor
token_type_ids: torch.Tensor
attention_mask: torch.Tensor
input_len: List[int]
overflowing_tokens: Optional[torch.Tensor] = None
num_truncated_tokens: Optional[torch.Tensor] = None
special_tokens_mask: Optional[torch.Tensor] = None
@thinc.registry.layers("TransformersTagger.v1")
def TransformersTagger(
starter: str, n_tags: int = 17
) -> Model[List[List[str]], List[Floats2d]]:
return chain(
TransformersTokenizer(starter),
Transformer(starter),
with_array(Softmax(nO=n_tags)),
)
@thinc.registry.layers("transformers_tokenizer.v1")
def TransformersTokenizer(name: str) -> Model[List[List[str]], TokensPlus]:
def forward(
model, texts: List[List[str]], is_train: bool
) -> Tuple[TokensPlus, Callable]:
tokenizer = model.attrs["tokenizer"]
token_data = tokenizer.batch_encode_plus(
[(text, None) for text in texts],
add_special_tokens=True,
return_token_type_ids=True,
return_attention_masks=True,
return_input_lengths=True,
return_tensors="pt",
)
return TokensPlus(**token_data), lambda d_tokens: []
return Model(
"tokenizer",
forward,
attrs={"tokenizer": AutoTokenizer.from_pretrained(name)},
)
@thinc.registry.layers("transformers_model.v1")
def Transformer(name: str) -> Model[TokensPlus, List[Floats2d]]:
return PyTorchWrapper(
AutoModel.from_pretrained(name),
convert_inputs=convert_transformer_inputs,
convert_outputs=convert_transformer_outputs,
)
def convert_transformer_inputs(model, tokens: TokensPlus, is_train):
kwargs = {
"input_ids": tokens.input_ids,
"attention_mask": tokens.attention_mask,
"token_type_ids": tokens.token_type_ids,
}
return ArgsKwargs(args=(), kwargs=kwargs), lambda dX: []
def convert_transformer_outputs(model, inputs_outputs, is_train):
layer_inputs, torch_outputs = inputs_outputs
torch_tokvecs: torch.Tensor = torch_outputs[0]
# Free the memory as soon as we can
torch_outputs = None
lengths = list(layer_inputs.input_len)
tokvecs: List[Floats2d] = model.ops.unpad(torch2xp(torch_tokvecs), lengths)
# Remove the BOS and EOS markers.
tokvecs = [arr[1:-1] for arr in tokvecs]
def backprop(d_tokvecs: List[Floats2d]) -> ArgsKwargs:
# Restore entries for bos and eos markers.
shim = model.shims[0]
row = model.ops.alloc2f(1, d_tokvecs[0].shape[1])
d_tokvecs = [model.ops.xp.vstack((row, arr, row)) for arr in d_tokvecs]
return ArgsKwargs(
args=(torch_tokvecs,),
kwargs={
"grad_tensors": xp2torch(model.ops.pad(d_tokvecs, device=shim.device))
},
)
return tokvecs, backprop
def evaluate_sequences(
model, Xs: List[Floats2d], Ys: List[Floats2d], batch_size: int
) -> float:
correct = 0.0
total = 0.0
for X, Y in model.ops.multibatch(batch_size, Xs, Ys):
Yh = model.predict(X)
for yh, y in zip(Yh, Y):
correct += (y.argmax(axis=1) == yh.argmax(axis=1)).sum()
total += y.shape[0]
return float(correct / total)
def minibatch_by_words(pairs, max_words):
"""Group pairs of sequences into minibatches under max_words in size,
considering padding. The size of a padded batch is the length of its
longest sequence multiplied by the number of elements in the batch.
"""
pairs = list(zip(*pairs))
pairs.sort(key=lambda xy: len(xy[0]), reverse=True)
batch = []
for X, Y in pairs:
batch.append((X, Y))
n_words = max(len(xy[0]) for xy in batch) * len(batch)
if n_words >= max_words:
# We went *over* the cap, so don't emit the batch with this
# example -- move that example into the next one.
yield batch[:-1]
batch = [(X, Y)]
if batch:
yield batch
if __name__ == "__main__":
typer.run(main)
thinc-release-v9.1.1/examples/type_checking.py 0000664 0000000 0000000 00000002656 14670643317 0021463 0 ustar 00root root 0000000 0000000 from typing import List
from thinc.layers import Relu, Softmax, chain, reduce_max, concatenate
from thinc.model import Model
# Define Custom X/Y types
MyModelX = List[List[float]]
MyModelY = List[List[float]]
model: Model[MyModelX, MyModelY] = chain(
Relu(12), Relu(12, dropout=0.2), Softmax(),
)
# ERROR: incompatible type "bool", expected "List[List[float]]"
model(False)
# ERROR: List item 0 has incompatible type "str"; expected "float"
model.begin_update([["0"]])
# ERROR: incompatible type "bool", expected "List[List[float]]"
model.predict(True)
# This example should be run with mypy. This is an example of type-level checking
# for network validity.
#
# We first define an invalid network.
# It's invalid because reduce_max expects Array3d as input, while Relu produces
# Array2d as output. chain has type-logic to verify input and output types
# line up.
#
# You should see the error an error,
# examples/howto/type_chain.py:10: error: Cannot infer type argument 2 of "chain"
bad_model = chain(Relu(10), reduce_max(), Softmax())
concate_model = concatenate(Relu(10), reduce_max(), Relu(10), Relu(10)), reduce_max()
concate_chain_model = chain(
concatenate(Relu(10), reduce_max(), Relu(10), Relu(10)), reduce_max()
)
# Now let's try it with a network that does work, just to be sure.
good_model = chain(Relu(10), Relu(10), Softmax())
# Finally we can reveal_type on the good model, to see what it thinks.
reveal_type(good_model)
thinc-release-v9.1.1/netlify.toml 0000664 0000000 0000000 00000000140 14670643317 0017010 0 ustar 00root root 0000000 0000000 [build]
base = "website"
publish = "public"
command = "npm run deploy"
ignore = "false"
thinc-release-v9.1.1/pyproject.toml 0000664 0000000 0000000 00000003117 14670643317 0017364 0 ustar 00root root 0000000 0000000 [build-system]
requires = [
"setuptools",
"cython>=0.25,<3.0",
"murmurhash>=1.0.2,<1.1.0",
"cymem>=2.0.2,<2.1.0",
"preshed>=3.0.2,<3.1.0",
"blis>=1.0.0,<1.1.0",
"numpy>=2.0.0,<3.0.0; python_version < '3.9'",
"numpy>=2.0.0,<3.0.0; python_version >= '3.9'",
]
build-backend = "setuptools.build_meta"
[tool.cibuildwheel]
build = "*"
skip = "pp* cp36* cp37* cp38*"
test-skip = ""
free-threaded-support = false
archs = ["native"]
build-frontend = "default"
config-settings = {}
dependency-versions = "pinned"
environment = {}
environment-pass = []
build-verbosity = 0
before-all = ""
before-build = ""
repair-wheel-command = ""
test-command = ""
before-test = ""
test-requires = []
test-extras = []
container-engine = "docker"
manylinux-x86_64-image = "manylinux2014"
manylinux-i686-image = "manylinux2014"
manylinux-aarch64-image = "manylinux2014"
manylinux-ppc64le-image = "manylinux2014"
manylinux-s390x-image = "manylinux2014"
manylinux-pypy_x86_64-image = "manylinux2014"
manylinux-pypy_i686-image = "manylinux2014"
manylinux-pypy_aarch64-image = "manylinux2014"
musllinux-x86_64-image = "musllinux_1_2"
musllinux-i686-image = "musllinux_1_2"
musllinux-aarch64-image = "musllinux_1_2"
musllinux-ppc64le-image = "musllinux_1_2"
musllinux-s390x-image = "musllinux_1_2"
[tool.cibuildwheel.linux]
repair-wheel-command = "auditwheel repair -w {dest_dir} {wheel}"
[tool.cibuildwheel.macos]
repair-wheel-command = "delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel}"
[tool.cibuildwheel.windows]
[tool.cibuildwheel.pyodide]
[tool.isort]
profile = "black"
thinc-release-v9.1.1/requirements.txt 0000664 0000000 0000000 00000001447 14670643317 0017740 0 ustar 00root root 0000000 0000000 # Explosion-provided dependencies
murmurhash>=1.0.2,<1.1.0
cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0
blis>=1.0.0,<1.1.0
srsly>=2.4.0,<3.0.0
wasabi>=0.8.1,<1.2.0
catalogue>=2.0.4,<2.1.0
confection>=0.0.1,<1.0.0
ml_datasets>=0.2.0,<0.3.0; python_version < "3.11"
# Third-party dependencies
pydantic>=1.7.4,!=1.8,!=1.8.1,<3.0.0
numpy>=2.0.0,<3.0.0
packaging>=20.0
# Development dependencies
cython>=0.25.0,<3.0
hypothesis>=3.27.0,<6.72.2
pytest>=8.2.0
pytest-cov>=2.7.0,<5.0.0
coverage>=5.0.0,<8.0.0
mock>=2.0.0,<3.0.0
flake8>=3.5.0,<3.6.0
mypy>=1.5.0,<1.6.0; platform_machine != "aarch64"
types-mock>=0.1.1
# Executing notebook tests
ipykernel>=5.1.4,<5.2.0
nbconvert>=5.6.1,<6.2.0
nbformat>=5.0.4,<5.2.0
# Test to_disk/from_disk against pathlib.Path subclasses
pathy>=0.3.5
black>=22.0,<23.0
isort>=5.0,<6.0
thinc-release-v9.1.1/setup.cfg 0000664 0000000 0000000 00000007225 14670643317 0016275 0 ustar 00root root 0000000 0000000 [metadata]
description = A refreshing functional take on deep learning, compatible with your favorite libraries
url = https://github.com/explosion/thinc
author = Explosion
author_email = contact@explosion.ai
license = MIT
long_description = file: README.md
long_description_content_type = text/markdown
classifiers =
Development Status :: 5 - Production/Stable
Environment :: Console
Intended Audience :: Developers
Intended Audience :: Science/Research
License :: OSI Approved :: MIT License
Operating System :: POSIX :: Linux
Operating System :: MacOS :: MacOS X
Operating System :: Microsoft :: Windows
Programming Language :: Cython
Programming Language :: Python :: 3
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
Programming Language :: Python :: 3.12
Topic :: Scientific/Engineering
[options]
zip_safe = false
include_package_data = true
python_requires = >=3.9
setup_requires =
cython>=0.25,<3.0
numpy>=2.0.0,<3.0.0
# We also need our Cython packages here to compile against
cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0
murmurhash>=1.0.2,<1.1.0
blis>=1.0.0,<1.1.0
install_requires =
# Explosion-provided dependencies
blis>=1.0.0,<1.1.0
murmurhash>=1.0.2,<1.1.0
cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0
wasabi>=0.8.1,<1.2.0
srsly>=2.4.0,<3.0.0
catalogue>=2.0.4,<2.1.0
confection>=0.0.1,<1.0.0
# Third-party dependencies
setuptools
numpy>=2.0.0,<3.0.0
pydantic>=1.7.4,!=1.8,!=1.8.1,<3.0.0
packaging>=20.0
[options.entry_points]
pytest_randomly.random_seeder =
thinc = thinc.api:fix_random_seed
[options.extras_require]
cuda =
cupy>=5.0.0b4
cuda80 =
cupy-cuda80>=5.0.0b4
cuda90 =
cupy-cuda90>=5.0.0b4
cuda91 =
cupy-cuda91>=5.0.0b4
cuda92 =
cupy-cuda92>=5.0.0b4
cuda100 =
cupy-cuda100>=5.0.0b4
cuda101 =
cupy-cuda101>=5.0.0b4
cuda102 =
cupy-cuda102>=5.0.0b4
cuda110 =
cupy-cuda110>=5.0.0b4
cuda111 =
cupy-cuda111>=5.0.0b4
cuda112 =
cupy-cuda112>=5.0.0b4
cuda113 =
cupy-cuda113>=5.0.0b4
cuda114 =
cupy-cuda114>=5.0.0b4
cuda115 =
cupy-cuda115>=5.0.0b4
cuda116 =
cupy-cuda116>=5.0.0b4
cuda117 =
cupy-cuda117>=5.0.0b4
cuda11x =
cupy-cuda11x>=11.0.0
cuda12x =
cupy-cuda12x>=11.5.0
cuda-autodetect =
cupy-wheel>=11.0.0
datasets =
ml_datasets>=0.2.0,<0.3.0
torch =
torch>=1.6.0
tensorflow =
tensorflow>=2.0.0,<2.6.0
mxnet =
mxnet>=1.5.1,<1.6.0
[bdist_wheel]
universal = false
[sdist]
formats = gztar
[flake8]
ignore = E203, E266, E501, E731, W503, E741
max-line-length = 80
select = B,C,E,F,W,T4,B9
exclude =
thinc/__init__.py
thinc/layers/__init__.py
thinc/shims/__init__.py
thinc/api.py
thinc/types.py
thinc/tests/mypy
[tool:pytest]
markers =
slow
[coverage:run]
plugins = Cython.Coverage
omit =
**/tests/*
thinc/types.py
thinc/backends/cupy_ops.py
thinc/backends/_custom_kernels.py
thinc/backends/_cupy_allocators.py
thinc/layers/staticvectors.py
[coverage:report]
show_missing = True
exclude_lines =
pragma: no cover
# Don't complain about missing debug-only code:
def __unicode__
def __repr__
if self\.debug
# Don't complain if tests don't hit defensive assertion code:
raise AssertionError
raise NotImplementedError
# Don't complain if non-runnable code isn't run:
if 0:
if __name__ == .__main__.:
assert False
[mypy]
ignore_missing_imports = True
no_implicit_optional = True
plugins = pydantic.mypy, thinc.mypy
[mypy-thinc.tests.*]
implicit_reexport = True
[mypy-thinc.tests.mypy.*]
ignore_errors = True
thinc-release-v9.1.1/setup.py 0000664 0000000 0000000 00000006610 14670643317 0016163 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import platform
import sys
from setuptools.command.build_ext import build_ext
from sysconfig import get_path
from setuptools import Extension, setup, find_packages
from pathlib import Path
import numpy
from Cython.Build import cythonize
from Cython.Compiler import Options
# Preserve `__doc__` on functions and classes
# http://docs.cython.org/en/latest/src/userguide/source_files_and_compilation.html#compiler-options
Options.docstrings = True
ACCELERATE = "thinc.backends._accelerate"
APPLE_OPS = ["thinc.backends.apple_ops", ACCELERATE]
PACKAGES = find_packages()
MOD_NAMES = [
"thinc.backends.cblas",
"thinc.backends.numpy_ops",
"thinc.layers.sparselinear",
"thinc.layers.premap_ids",
] + (APPLE_OPS if platform.system() == "Darwin" else [])
COMPILE_OPTIONS = {
"msvc": ["/Ox", "/EHsc"],
"other": ["-O3", "-Wno-strict-prototypes", "-Wno-unused-function", "-std=c++11"],
}
COMPILER_DIRECTIVES = {
"language_level": 3,
"embedsignature": True,
"annotation_typing": False,
"profile": sys.version_info < (3, 12),
}
LINK_OPTIONS = {"msvc": [], "other": []}
# By subclassing build_extensions we have the actual compiler that will be used
# which is really known only after finalize_options
# http://stackoverflow.com/questions/724664/python-distutils-how-to-get-a-compiler-that-is-going-to-be-used
class build_ext_options:
def build_options(self):
if hasattr(self.compiler, "initialize"):
self.compiler.initialize()
self.compiler.platform = sys.platform[:6]
for e in self.extensions:
e.extra_compile_args = COMPILE_OPTIONS.get(
self.compiler.compiler_type, COMPILE_OPTIONS["other"]
)
e.extra_link_args = LINK_OPTIONS.get(
self.compiler.compiler_type, LINK_OPTIONS["other"]
)
class build_ext_subclass(build_ext, build_ext_options):
def build_extensions(self):
build_ext_options.build_options(self)
build_ext.build_extensions(self)
def clean(path):
for path in path.glob("**/*"):
if path.is_file() and path.suffix in (".so", ".cpp"):
print(f"Deleting {path.name}")
path.unlink()
def setup_package():
root = Path(__file__).parent
if len(sys.argv) > 1 and sys.argv[1] == "clean":
return clean(root / "thinc")
with (root / "thinc" / "about.py").open("r") as f:
about = {}
exec(f.read(), about)
include_dirs = [numpy.get_include(), get_path("include")]
ext_modules = []
for name in MOD_NAMES:
mod_path = name.replace(".", "/") + ".pyx"
if name == ACCELERATE:
ext = Extension(
name,
[mod_path],
language="c++",
include_dirs=include_dirs,
libraries=["blas"],
)
else:
ext = Extension(name, [mod_path], language="c++", include_dirs=include_dirs)
ext_modules.append(ext)
print("Cythonizing sources")
ext_modules = cythonize(
ext_modules, compiler_directives=COMPILER_DIRECTIVES, language_level=2
)
setup(
name="thinc",
packages=PACKAGES,
version=about["__version__"],
ext_modules=ext_modules,
cmdclass={"build_ext": build_ext_subclass},
package_data={"": ["*.pyx", "*.pxd", "*.pxi", "*.cu", "*.hh"]},
)
if __name__ == "__main__":
setup_package()
thinc-release-v9.1.1/thinc/ 0000775 0000000 0000000 00000000000 14670643317 0015553 5 ustar 00root root 0000000 0000000 thinc-release-v9.1.1/thinc/__init__.pxd 0000664 0000000 0000000 00000000000 14670643317 0020015 0 ustar 00root root 0000000 0000000 thinc-release-v9.1.1/thinc/__init__.py 0000664 0000000 0000000 00000000325 14670643317 0017664 0 ustar 00root root 0000000 0000000 # Necessary for some side-effects in Cython. Not sure I understand.
import numpy
from .about import __version__
from .config import registry
# fmt: off
__all__ = [
"registry",
"__version__",
]
# fmt: on
thinc-release-v9.1.1/thinc/about.py 0000664 0000000 0000000 00000000051 14670643317 0017233 0 ustar 00root root 0000000 0000000 __version__ = "9.1.1"
__release__ = True
thinc-release-v9.1.1/thinc/api.py 0000664 0000000 0000000 00000013570 14670643317 0016704 0 ustar 00root root 0000000 0000000 from .backends import (
CupyOps,
MPSOps,
NumpyOps,
Ops,
get_current_ops,
get_ops,
set_current_ops,
set_gpu_allocator,
use_ops,
use_pytorch_for_gpu_memory,
use_tensorflow_for_gpu_memory,
)
from .compat import enable_mxnet, enable_tensorflow, has_cupy
from .config import Config, ConfigValidationError, registry
from .initializers import (
configure_normal_init,
glorot_uniform_init,
normal_init,
uniform_init,
zero_init,
)
from .layers import (
LSTM,
CauchySimilarity,
ClippedLinear,
Dish,
Dropout,
Embed,
Gelu,
HardSigmoid,
HardSwish,
HardSwishMobilenet,
HardTanh,
HashEmbed,
LayerNorm,
Linear,
Logistic,
Maxout,
Mish,
MultiSoftmax,
MXNetWrapper,
ParametricAttention,
ParametricAttention_v2,
PyTorchLSTM,
PyTorchRNNWrapper,
PyTorchWrapper,
PyTorchWrapper_v2,
PyTorchWrapper_v3,
Relu,
ReluK,
Sigmoid,
Softmax,
Softmax_v2,
SparseLinear,
SparseLinear_v2,
Swish,
TensorFlowWrapper,
TorchScriptWrapper_v1,
add,
array_getitem,
bidirectional,
chain,
clone,
concatenate,
expand_window,
keras_subclass,
list2array,
list2padded,
list2ragged,
map_list,
noop,
padded2list,
premap_ids,
pytorch_to_torchscript_wrapper,
ragged2list,
reduce_first,
reduce_last,
reduce_max,
reduce_mean,
reduce_sum,
remap_ids,
remap_ids_v2,
residual,
resizable,
siamese,
sigmoid_activation,
softmax_activation,
strings2arrays,
tuplify,
uniqued,
with_array,
with_array2d,
with_cpu,
with_debug,
with_flatten,
with_flatten_v2,
with_getitem,
with_list,
with_nvtx_range,
with_padded,
with_ragged,
with_reshape,
with_signpost_interval,
)
from .loss import (
CategoricalCrossentropy,
CosineDistance,
L2Distance,
SequenceCategoricalCrossentropy,
)
from .model import (
Model,
change_attr_values,
deserialize_attr,
serialize_attr,
set_dropout_rate,
wrap_model_recursive,
)
from .optimizers import SGD, Adam, Optimizer, RAdam
from .schedules import (
Schedule,
compounding,
constant,
constant_then,
cyclic_triangular,
decaying,
plateau,
slanted_triangular,
warmup_linear,
)
from .shims import (
MXNetShim,
PyTorchGradScaler,
PyTorchShim,
Shim,
TensorFlowShim,
TorchScriptShim,
keras_model_fns,
maybe_handshake_model,
)
from .types import ArgsKwargs, Padded, Ragged, Unserializable
from .util import (
DataValidationError,
data_validation,
fix_random_seed,
get_array_module,
get_torch_default_device,
get_width,
is_cupy_array,
mxnet2xp,
prefer_gpu,
require_cpu,
require_gpu,
set_active_gpu,
tensorflow2xp,
to_categorical,
to_numpy,
torch2xp,
xp2mxnet,
xp2tensorflow,
xp2torch,
)
try:
from .backends import AppleOps
except ImportError:
AppleOps = None
# fmt: off
__all__ = [
# .config
"Config", "registry", "ConfigValidationError",
# .initializers
"normal_init", "uniform_init", "glorot_uniform_init", "zero_init",
"configure_normal_init",
# .loss
"CategoricalCrossentropy", "L2Distance", "CosineDistance",
"SequenceCategoricalCrossentropy",
# .model
"Model", "serialize_attr", "deserialize_attr",
"set_dropout_rate", "change_attr_values", "wrap_model_recursive",
# .shims
"Shim", "PyTorchGradScaler", "PyTorchShim", "TensorFlowShim", "keras_model_fns",
"MXNetShim", "TorchScriptShim", "maybe_handshake_model",
# .optimizers
"Adam", "RAdam", "SGD", "Optimizer",
# .schedules
"Schedule", "cyclic_triangular", "warmup_linear", "constant", "constant_then",
"decaying", "slanted_triangular", "compounding", "plateau",
# .types
"Ragged", "Padded", "ArgsKwargs", "Unserializable",
# .util
"fix_random_seed", "is_cupy_array", "set_active_gpu",
"prefer_gpu", "require_gpu", "require_cpu",
"DataValidationError", "data_validation",
"to_categorical", "get_width", "get_array_module", "to_numpy",
"torch2xp", "xp2torch", "tensorflow2xp", "xp2tensorflow", "mxnet2xp", "xp2mxnet",
"get_torch_default_device",
# .compat
"enable_mxnet",
"enable_tensorflow",
"has_cupy",
# .backends
"get_ops", "set_current_ops", "get_current_ops", "use_ops",
"Ops", "AppleOps", "CupyOps", "MPSOps", "NumpyOps", "set_gpu_allocator",
"use_pytorch_for_gpu_memory", "use_tensorflow_for_gpu_memory",
# .layers
"Dropout", "Embed", "expand_window", "HashEmbed", "LayerNorm", "Linear",
"Maxout", "Mish", "MultiSoftmax", "Relu", "softmax_activation", "Softmax", "LSTM",
"CauchySimilarity", "ParametricAttention", "Logistic",
"resizable", "sigmoid_activation", "Sigmoid", "SparseLinear",
"ClippedLinear", "ReluK", "HardTanh", "HardSigmoid",
"Dish", "HardSwish", "HardSwishMobilenet", "Swish", "Gelu",
"PyTorchWrapper", "PyTorchRNNWrapper", "PyTorchLSTM",
"TensorFlowWrapper", "keras_subclass", "MXNetWrapper",
"PyTorchWrapper_v2", "Softmax_v2", "PyTorchWrapper_v3",
"SparseLinear_v2", "TorchScriptWrapper_v1", "ParametricAttention_v2",
"add", "bidirectional", "chain", "clone", "concatenate", "noop",
"residual", "uniqued", "siamese", "list2ragged", "ragged2list",
"map_list",
"with_array", "with_array2d",
"with_padded", "with_list", "with_ragged", "with_flatten",
"with_reshape", "with_getitem", "strings2arrays", "list2array",
"list2ragged", "ragged2list", "list2padded", "padded2list",
"remap_ids", "remap_ids_v2", "premap_ids",
"array_getitem", "with_cpu", "with_debug", "with_nvtx_range",
"with_signpost_interval",
"tuplify", "with_flatten_v2",
"pytorch_to_torchscript_wrapper",
"reduce_first", "reduce_last", "reduce_max", "reduce_mean", "reduce_sum",
]
# fmt: on
thinc-release-v9.1.1/thinc/backends/ 0000775 0000000 0000000 00000000000 14670643317 0017325 5 ustar 00root root 0000000 0000000 thinc-release-v9.1.1/thinc/backends/__init__.pxd 0000664 0000000 0000000 00000000000 14670643317 0021567 0 ustar 00root root 0000000 0000000 thinc-release-v9.1.1/thinc/backends/__init__.py 0000664 0000000 0000000 00000012317 14670643317 0021442 0 ustar 00root root 0000000 0000000 import contextlib
import threading
from contextvars import ContextVar
from typing import Any, Callable, Dict, Optional, Type, cast
from .. import registry
from ..compat import cupy, has_cupy
from ..util import (
assert_pytorch_installed,
assert_tensorflow_installed,
get_torch_default_device,
is_cupy_array,
require_cpu,
)
from ._cupy_allocators import cupy_pytorch_allocator, cupy_tensorflow_allocator
from ._param_server import ParamServer
from .cupy_ops import CupyOps
from .mps_ops import MPSOps
from .numpy_ops import NumpyOps
from .ops import Ops
try:
from .apple_ops import AppleOps
except ImportError:
AppleOps = None
context_ops: ContextVar[Optional[Ops]] = ContextVar("context_ops", default=None)
context_pools: ContextVar[dict] = ContextVar("context_pools", default={})
# Internal use of thread-local storage only for detecting cases where a Jupyter
# notebook might not have preserved contextvars across cells.
_GLOBAL_STATE = {"ops": None}
# Thread-local state.
_LOCAL_STATE = threading.local()
def set_gpu_allocator(allocator: str) -> None: # pragma: no cover
"""Route GPU memory allocation via PyTorch or tensorflow.
Raise an error if the given argument does not match either of the two.
"""
if allocator == "pytorch":
use_pytorch_for_gpu_memory()
elif allocator == "tensorflow":
use_tensorflow_for_gpu_memory()
else:
raise ValueError(
f"Invalid 'gpu_allocator' argument: '{allocator}'. Available allocators are: 'pytorch', 'tensorflow'"
)
def use_pytorch_for_gpu_memory() -> None: # pragma: no cover
"""Route GPU memory allocation via PyTorch.
This is recommended for using PyTorch and cupy together, as otherwise
OOM errors can occur when there's available memory sitting in the other
library's pool.
We'd like to support routing Tensorflow memory allocation via PyTorch as well
(or vice versa), but do not currently have an implementation for it.
"""
assert_pytorch_installed()
if get_torch_default_device().type != "cuda":
return
pools = context_pools.get()
if "pytorch" not in pools:
pools["pytorch"] = cupy.cuda.MemoryPool(allocator=cupy_pytorch_allocator)
cupy.cuda.set_allocator(pools["pytorch"].malloc)
def use_tensorflow_for_gpu_memory() -> None: # pragma: no cover
"""Route GPU memory allocation via TensorFlow.
This is recommended for using TensorFlow and cupy together, as otherwise
OOM errors can occur when there's available memory sitting in the other
library's pool.
We'd like to support routing PyTorch memory allocation via Tensorflow as
well (or vice versa), but do not currently have an implementation for it.
"""
assert_tensorflow_installed()
pools = context_pools.get()
if "tensorflow" not in pools:
pools["tensorflow"] = cupy.cuda.MemoryPool(allocator=cupy_tensorflow_allocator)
cupy.cuda.set_allocator(pools["tensorflow"].malloc)
def _import_extra_cpu_backends():
try:
from thinc_bigendian_ops import BigEndianOps
except ImportError:
pass
def get_ops(name: str, **kwargs) -> Ops:
"""Get a backend object.
The special name "cpu" returns the best available CPU backend."""
ops_by_name = {ops_cls.name: ops_cls for ops_cls in registry.ops.get_all().values()} # type: ignore
cls: Optional[Callable[..., Ops]] = None
if name == "cpu":
_import_extra_cpu_backends()
cls = ops_by_name.get("numpy")
cls = ops_by_name.get("apple", cls)
cls = ops_by_name.get("bigendian", cls)
else:
cls = ops_by_name.get(name)
if cls is None:
raise ValueError(f"Invalid backend: {name}")
return cls(**kwargs)
def get_array_ops(arr):
"""Return CupyOps for a cupy array, NumpyOps otherwise."""
if is_cupy_array(arr):
return CupyOps()
else:
return NumpyOps()
@contextlib.contextmanager
def use_ops(name: str, **kwargs):
"""Change the backend to execute on for the scope of the block."""
current_ops = get_current_ops()
set_current_ops(get_ops(name, **kwargs))
try:
yield
finally:
set_current_ops(current_ops)
def get_current_ops() -> Ops:
"""Get the current backend object."""
if context_ops.get() is None:
require_cpu()
return cast(Ops, context_ops.get())
def set_current_ops(ops: Ops) -> None:
"""Change the current backend object."""
context_ops.set(ops)
_get_thread_state().ops = ops
def contextvars_eq_thread_ops() -> bool:
current_ops = context_ops.get()
thread_ops = _get_thread_state().ops
if type(current_ops) == type(thread_ops):
return True
return False
def _get_thread_state() -> threading.local:
"""Get a thread-specific state variable that inherits from a global
state when it's created."""
if not hasattr(_LOCAL_STATE, "initialized") or not _LOCAL_STATE.initialized:
for name, value in _GLOBAL_STATE.items():
setattr(_LOCAL_STATE, name, value)
_LOCAL_STATE.initialized = True
return _LOCAL_STATE
__all__ = [
"set_current_ops",
"get_current_ops",
"use_ops",
"ParamServer",
"Ops",
"AppleOps",
"CupyOps",
"MPSOps",
"NumpyOps",
"has_cupy",
]
thinc-release-v9.1.1/thinc/backends/_accelerate.pxd 0000664 0000000 0000000 00000003670 14670643317 0022277 0 ustar 00root root 0000000 0000000 cdef extern from "Accelerate/Accelerate.h":
enum CBLAS_ORDER: CblasRowMajor, CblasColMajor
enum CBLAS_TRANSPOSE: CblasNoTrans, CblasTrans, CblasConjTrans
enum CBLAS_UPLO: CblasUpper, CblasLower
enum CBLAS_DIAG: CblasNonUnit, CblasUnit
enum CBLAS_SIDE: CblasLeft, CblasRight
# BLAS level 1 routines
void cblas_sswap(int M, float *x, int incX, float *y, int incY) nogil
void cblas_sscal(int N, float alpha, float *x, int incX) nogil
void cblas_scopy(int N, float *x, int incX, float *y, int incY) nogil
void cblas_saxpy(int N, float alpha, float *x, int incX, float *y, int incY ) nogil
float cblas_sdot(int N, float *x, int incX, float *y, int incY ) nogil
float cblas_snrm2(int N, float *x, int incX) nogil
float cblas_sasum(int N, float *x, int incX) nogil
int cblas_isamax(int N, float *x, int incX) nogil
# BLAS level 2 routines
void cblas_sgemv(CBLAS_ORDER Order, CBLAS_TRANSPOSE TransA, int M, int N,
float alpha, float *A, int lda, float *x, int incX,
float beta, float *y, int incY) nogil
void cblas_sger(CBLAS_ORDER Order, int M, int N, float alpha, float *x,
int incX, float *y, int incY, float *A, int lda) nogil
# BLAS level 3 routines
void cblas_sgemm(CBLAS_ORDER Order, CBLAS_TRANSPOSE TransA,
CBLAS_TRANSPOSE TransB, int M, int N, int K,
float alpha, float *A, int lda, float *B, int ldb,
float beta, float *C, int ldc) nogil
cdef void sgemm(bint TransA, bint TransB, int M, int N, int K,
float alpha, const float* A, int lda, const float *B,
int ldb, float beta, float* C, int ldc) nogil
cdef void saxpy(int N, float alpha, const float* X, int incX,
float *Y, int incY) nogil
thinc-release-v9.1.1/thinc/backends/_accelerate.pyx 0000664 0000000 0000000 00000003643 14670643317 0022324 0 ustar 00root root 0000000 0000000 cimport numpy as np
from libc.stdint cimport uintptr_t
import numpy
cpdef np.ndarray gemm(float[:, ::1] A, float[:, ::1] B,
bint trans1=False, bint trans2=False,
np.ndarray out=None):
cdef int nM = A.shape[0] if not trans1 else A.shape[1]
cdef int nK = A.shape[1] if not trans1 else A.shape[0]
cdef int nK_b = B.shape[0] if not trans2 else B.shape[1]
cdef int nN = B.shape[1] if not trans2 else B.shape[0]
cdef float[:, ::1] C = out
if out is None:
out = numpy.empty((nM, nN), dtype="f")
C = out
else:
if C.shape[0] != nM or C.shape[1] != nN:
msg = "Shape mismatch for output matrix, was: (%d, %d), expected (%d, %d)"
raise ValueError(msg % (C.shape[0], C.shape[1], nM, nN))
if nK != nK_b:
msg = "Shape mismatch for gemm: (%d, %d), (%d, %d)"
raise ValueError(msg % (nM, nK, nK_b, nN))
if nM == 0 or nK == 0 or nN == 0:
return out
cblas_sgemm(
CblasRowMajor,
CblasTrans if trans1 else CblasNoTrans,
CblasTrans if trans2 else CblasNoTrans,
nM,
nN,
nK,
1.0,
&A[0, 0],
A.shape[1],
&B[0, 0],
B.shape[1],
0.0,
&C[0, 0],
C.shape[1]
)
return out
cdef void sgemm(bint TransA, bint TransB, int M, int N, int K,
float alpha, const float* A, int lda, const float *B,
int ldb, float beta, float* C, int ldc) nogil:
cblas_sgemm(
CblasRowMajor,
CblasTrans if TransA else CblasNoTrans,
CblasTrans if TransB else CblasNoTrans,
M,
N,
K,
alpha,
A,
lda,
B,
ldb,
beta,
C,
ldc
)
cdef void saxpy(int N, float alpha, const float* X, int incX,
float *Y, int incY) nogil:
cblas_saxpy(N, alpha, X, incX, Y, incY)
thinc-release-v9.1.1/thinc/backends/_cupy_allocators.py 0000664 0000000 0000000 00000004250 14670643317 0023242 0 ustar 00root root 0000000 0000000 from typing import cast
from ..compat import cupy, tensorflow, torch
from ..types import ArrayXd
from ..util import get_torch_default_device, tensorflow2xp
def cupy_tensorflow_allocator(size_in_bytes: int):
"""Function that can be passed into cupy.cuda.set_allocator, to have cupy
allocate memory via TensorFlow. This is important when using the two libraries
together, as otherwise OOM errors can occur when there's available memory
sitting in the other library's pool.
"""
size_in_bytes = max(1024, size_in_bytes)
tensor = tensorflow.zeros((size_in_bytes // 4,), dtype=tensorflow.dtypes.float32) # type: ignore
# We convert to cupy via dlpack, so that we can get a memory pointer.
cupy_array = cast(ArrayXd, tensorflow2xp(tensor))
address = int(cupy_array.data)
# cupy has a neat class to help us here. Otherwise it will try to free.
memory = cupy.cuda.memory.UnownedMemory(address, size_in_bytes, cupy_array)
# Now return a new memory pointer.
return cupy.cuda.memory.MemoryPointer(memory, 0)
def cupy_pytorch_allocator(size_in_bytes: int):
device = get_torch_default_device()
"""Function that can be passed into cupy.cuda.set_allocator, to have cupy
allocate memory via PyTorch. This is important when using the two libraries
together, as otherwise OOM errors can occur when there's available memory
sitting in the other library's pool.
"""
# Cupy was having trouble with very small allocations?
size_in_bytes = max(1024, size_in_bytes)
# We use pytorch's underlying FloatStorage type to avoid overhead from
# creating a whole Tensor.
# This turns out to be way faster than making FloatStorage? Maybe
# a Python vs C++ thing I guess?
torch_tensor = torch.zeros(
(size_in_bytes // 4,), requires_grad=False, device=device
)
# cupy has a neat class to help us here. Otherwise it will try to free.
# I think this is a private API? It's not in the types.
address = torch_tensor.data_ptr() # type: ignore
memory = cupy.cuda.memory.UnownedMemory(address, size_in_bytes, torch_tensor)
# Now return a new memory pointer.
return cupy.cuda.memory.MemoryPointer(memory, 0)
thinc-release-v9.1.1/thinc/backends/_custom_kernels.cu 0000664 0000000 0000000 00000044606 14670643317 0023064 0 ustar 00root root 0000000 0000000 // Use grid strided loops, described here:
// https://devblogs.nvidia.com/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/
// This pattern ensures that all of the loop values are visited once, no matter
// what grid parameters are used for the function.
// We cannot include CUDA header for mathematical constants, since it requires
// that the development headers of the CUDA toolkit are installed.
template
struct Constants {};
template <>
struct Constants {
static constexpr double INV_SQRT_2 = 0.7071067811865475;
static constexpr double INV_SQRT_2PI = 0.3989422804014327;
};
template <>
struct Constants {
static constexpr float INV_SQRT_2 = 0.70710677;
static constexpr float INV_SQRT_2PI = 0.3989423;
};
template
__global__ void gather_add(U* out_bo, const U* table_to, const int* indices_bk,
int T, int O, int B, int K)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int b = _loop_start; b < B; b += _loop_stride) {
for (int k = 0; k < K; ++k) {
int idx = indices_bk[b * K + k];
const U* table = table_to + idx * O;
U* out = out_bo + b * O;
for (int o = 0; o < O; ++o) {
out[o] += table[o];
}
}
}
}
template
__global__ void seq2col(T* output, const T* X, const int* lengths,
int nW, int B, int I, int nL)
{
// Let's say nW is 1 (it usually is). Then we want to take:
// 1a 1b 1c
// 2a 2b 2c
// 3a 3b 3c
// And make
// __ __ __ 1a 1b 1c 2a 2b 2c
// 1a 1b 1c 2a 2b 2c 3a 3b 3c
// 2a 2b 2c 3a 3b 3c __ __ __
// Where __ is padding.
// Now let's say nW is 2. Then we want to take:
// 1a 1b 1c
// 2a 2b 2c
// 3a 3b 3c
// And make
// __ __ __ __ __ __ 1a 1b 1c 2a 2b 2c 3a 3b 3c
// __ __ __ 1a 1b 1c 2a 2b 2c 3a 3b 3c __ __ __
// 1a 1b 1c 2a 2b 2c 3a 3b 3c __ __ __ __ __ __
// * x_start=-6, x_end=9 : (0-2) * 3, (0+2+1) * 3
// * x_start=-3, x_end=13 : (1-2) * 3, (1+2+1) * 3
// * x_start=0, x_end=16 : (2-2) * 3, (2+2+1) * 3
//
// If lengths > 1, then the sequence lengths dictate
// the boundaries/padding rather than the begin/end
// of X.
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
int nF = nW * 2 + 1;
int seq = 0;
int seq_start = 0;
for (int b = _loop_start; b < B; b += _loop_stride)
{
// Find sequence sequence in which b lies.
for (; seq < nL; ++seq) {
if (b < seq_start + lengths[seq]) {
break;
}
seq_start += lengths[seq];
}
// Calculate the bounds of the sequence wherein b lies.
int seq_end = seq_start + lengths[seq];
// Find the unconstrained window around b, which
// may be out of the sequence bounds.
int window_start = b - nW;
int window_end = b + nW + 1;
// Find the sequence-constrained window around b.
int x_start = max(seq_start, window_start);
int x_end = min(seq_end, window_end);
int n_elems = x_end - x_start;
// If the left window is cut short, we want to start by
// the same amount in the output.
int out_offset = x_start - window_start;
for (int i = 0; i < n_elems * I; i++) {
output[(b * I * nF) + (out_offset * I) + i] =
X[(x_start * I) + i];
}
}
}
template
__global__ void pad(T* out, T const **seqs, int const *lengths, int stride, int N, int L)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int i = _loop_start; i < L * stride; i += _loop_stride) {
for (int j = 0; j < N; ++j) {
T const *seq = seqs[j];
if (i < lengths[j] * stride) {
out[j * L * stride + i] = seq[i];
} else {
out[j * L * stride + i] = T();
}
}
}
}
template
__global__ void maxout(T* best, int* which, const T* cands, int B, int O, int P)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int bo = _loop_start; bo < B * O; bo += _loop_stride)
{
// Go to the candidates at the output we're working on
const T* cands_bo = &cands[bo * P];
int best_idx = 0;
T best_val = cands_bo[0];
for (int p = 1; p < P; ++p)
{
if (cands_bo[p] > best_val) {
best_idx = p;
best_val = cands_bo[p];
}
}
which[bo] = best_idx;
best[bo] = best_val;
}
}
template
__global__ void clipped_linear(T* Y, const T* X, double slope, double offset, double min_val, double max_val, int N)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int i = _loop_start; i < N; i += _loop_stride)
{
T y = X[i] * slope + offset;
Y[i] = min(max(y, min_val), max_val);
}
}
template
__global__ void dish(T* Y, const T* X, int N)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int i = _loop_start; i < N; i += _loop_stride)
{
T x = X[i];
Y[i] = 0.5 * x * (x / sqrt(1 + x * x) + 1);
}
}
template
__global__ void gelu(T* Y, const T* X, double threshold, int N)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int i = _loop_start; i < N; i += _loop_stride)
{
T x = X[i];
if (x >= threshold) {
Y[i] = x;
} else if (x <= -threshold) {
Y[i] = 0.0;
} else {
T cdf = 0.5 * (1.0 + erf(Constants::INV_SQRT_2 * x));
Y[i] = x * cdf;
}
}
}
template
__global__ void mish(T* Y, const T* X, double threshold, int N)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
T one = 1.;
for (int i = _loop_start; i < N; i += _loop_stride)
{
if (X[i] >= threshold)
Y[i] = X[i];
else
Y[i] = X[i] * tanh(log(one + exp(X[i])));
}
}
template
__global__ void swish(T* Y, const T* X, double threshold, int N)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int i = _loop_start; i < N; i += _loop_stride)
{
if (X[i] >= threshold) {
Y[i] = X[i];
} else if (X[i] <= -threshold) {
Y[i] = 0.0;
} else {
T logistic_cdf = 1.0 / (1.0 + exp(-X[i]));
Y[i] = X[i] * logistic_cdf;
}
}
}
template
__global__ void reduce_sum(U* output, const U* X,
const int* lengths, int B, int T, int O)
{
// Compute sums of a batch of concatenated sequences
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int b = _loop_start; b < B; b += _loop_stride)
{
// Go to the regions we're working on
U* output_b = &output[b*O];
// Find the sequence item we're working on
int t = 0;
for (int i=0; i < b; ++i) {
t += lengths[i];
}
int length = lengths[b];
// Each invocation of the kernel sums one batch.
for (int i=0; i < length; ++i) // Iterate over rows
{
const U* X_t = &X[(t+i)*O];
for (int j=0; j < O; ++j)
{
output_b[j] += X_t[j];
}
}
}
}
template
__global__ void reduce_max(U* maxes, int* which,
const U* X, const int* lengths, int B, int T, int O)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int b = _loop_start; b < B; b += _loop_stride)
{
// Go to the regions we're working on
U* maxes_b = &maxes[b*O];
int* which_b = &which[b*O];
// Find the sequence item we're working on
const U* X_t = X;
for (int i=0; i < b; ++i) {
X_t += lengths[i] * O;
}
// Each invocation of the kernel maxes one sequence.
// Start by assuming maxes are the first element.
for (int i=0; i < O; ++i) {
maxes_b[i] = X_t[i];
which_b[i] = 0;
}
int length = lengths[b];
for (int i=1; i < length; ++i) // Iterate over rows
{
X_t += O;
for (int j=0; j < O; ++j)
{
if (X_t[j] > maxes_b[j])
{
maxes_b[j] = X_t[j];
which_b[j] = i;
}
}
}
}
}
template
__global__ void backprop_seq2col(T* d_seqs, const T* d_cols, const int* lengths,
int nW, int B, int I, int nL)
{
// Here's what we're doing, if we had 2d indexing.
//for i in range(B):
// d_seq[i] += d_cols[i-2, 4]
// d_seq[i] += d_cols[i-1, 3]
// d_seq[i] += d_cols[i, 2]
// d_seq[i] += d_cols[i+1, 1]
// d_seq[i] += d_cols[i+2, 0]
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
int nF = nW * 2 + 1;
int seq = 0;
int seq_start = 0;
for (int b = _loop_start; b < B; b += _loop_stride)
{
// Find sequence offset in which b lies.
// Fixme: do not restart offset search for every b.
for (; seq < nL; ++seq) {
if (b < seq_start + lengths[seq]) {
break;
}
seq_start += lengths[seq];
}
// Calculate the bounds of the sequence wherein b lies.
int seq_end = seq_start + lengths[seq];
// Find the unconstrained window around b, which
// may be out of the sequence bounds.
int window_start = b - nW;
int window_end = b + nW + 1;
// Find the sequence-constrained window around b.
int d_seqs_start = max(seq_start, window_start);
int d_seqs_end = min(seq_end, window_end);
// The here update proceeds differently than the other seq2col
// implementations. We have to do all the updates for the b in this loop
// iteration, otherwise we get data races due to parallelism in CUDA.
//
// A batch item b occurs, given nw=1, in:
//
// position 0 in b - 1 (if present) <- window_start
// position 1 in b
// position 2 in b + 1 (if present) <- window_end
//
// The following loop sums the gradients for those occurrences.
// b_w loops over [b - 1, b, b + 1] and computes the position
// of b within the column gradients of [b - 1 ... b + 1].
for (int b_w = d_seqs_start; b_w < d_seqs_end; ++b_w) {
int position = (2 * nW) - (b_w - window_start);
int start = (b_w * I * nF) + (position * I);
for (int i = 0; i < I; ++i) {
d_seqs[(b*I + i)] += d_cols[start + i];
}
}
}
}
template
__global__ void backprop_clipped_linear(T* dX, const T* dY, const T* X, double slope, double offset, double min_val, double max_val, int N)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
T low = (min_val - offset) / slope;
T high = (max_val - offset) / slope;
for (int i = _loop_start; i < N; i += _loop_stride)
{
T x = X[i];
if (low < x && x < high) {
dX[i] = dY[i] * slope;
} else {
dX[i] = 0;
}
}
}
template
__global__ void backprop_hard_swish(T* dX, const T* dY, const T* X, int N)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int i = _loop_start; i < N; i += _loop_stride)
{
if (X[i] > 2.5) {
dX[i] = dY[i];
} else if (X[i] < -2.5) {
dX[i] = 0;
} else {
dX[i] = dY[i] * (X[i] * 0.4 + 0.5);
}
}
}
template
__global__ void backprop_hard_swish_mobilenet(T* dX, const T* dY, const T* X, int N)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int i = _loop_start; i < N; i += _loop_stride)
{
if (X[i] > 3.0) {
dX[i] = dY[i];
} else if (X[i] < -3.0) {
dX[i] = 0;
} else {
dX[i] = dY[i] * ((X[i] * 2.0 + 3.0) / 6.0);
}
}
}
template
__global__ void backprop_dish(T* dX, const T* dY, const T* X, int N)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int i = _loop_start; i < N; i += _loop_stride)
{
T x = X[i];
T x_sq = x * x;
T x_sq_plus_one = x_sq + 1.0;
dX[i] = dY[i] * (x/sqrt(x_sq_plus_one) - (0.5 * x * x_sq)
/ pow(x_sq_plus_one, static_cast(1.5)) + 0.5);
}
}
template
__global__ void backprop_gelu(T* dX, const T* dY, const T* X,
double threshold, int N)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int i = _loop_start; i < N; i += _loop_stride)
{
T x = X[i];
if (x >= threshold) {
dX[i] = dY[i];
} else if (x <= -threshold) {
dX[i] = 0.0;
} else {
T cdf = 0.5 * (1.0 + erf(Constants::INV_SQRT_2 * x));
T pdf = Constants::INV_SQRT_2PI * exp(-0.5 * x * x);
dX[i] = dY[i] * (cdf + x * pdf);
}
}
}
template
__global__ void backprop_maxout(T* dX,
const T* dY, const int* which, int B, int O, int P)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int b = _loop_start; b < B; b += _loop_stride)
{
// Go to the regions we're working on
T* dX_b = &dX[b*O*P];
const T* dY_b = &dY[b*O];
const int* which_b = &which[b*O];
for (int i=0; i < O; ++i)
dX_b[(i*P)+which_b[i]] = dY_b[i];
}
}
template
__global__ void backprop_mish(T* dX,
const T* dY, const T* X, double threshold, int N)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int i = _loop_start; i < N; i += _loop_stride)
{
T x = X[i];
if (x >= threshold)
{
dX[i] = dY[i];
} else
{
T exp_x = exp(x);
T exp_2x = exp(2*x);
T exp_3x = exp(3*x);
T omega = (4. * (x+1)) + (4 * exp_2x) + exp_3x + exp_x * (4.*x+6);
T delta = 2 * exp_x + exp_2x + 2;
dX[i] = dY[i] * ((exp_x * omega) / (delta * delta));
}
}
}
template
__global__ void backprop_swish(T* dX, const T* dY, const T* X,
const T* Y, double threshold, int N)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int i = _loop_start; i < N; i += _loop_stride)
{
T x = X[i];
T y = Y[i];
if (x >= threshold) {
dX[i] = dY[i];
} else if (x <= -threshold) {
dX[i] = 0.0;
} else {
T cdf = 1.0 / (1 + exp(-x));
T d = y + cdf * (1 - y);
dX[i] = dY[i] * d;
}
}
}
template
__global__ void backprop_reduce_sum(U* dX, const U* d_sum, const int* lengths,
int B, int T, int O)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
int seq_start = 0;
int b = 0;
for (int t = _loop_start; t < T; t += _loop_stride)
{
// Find the sequence item we're working on
while ((b < B) && (seq_start+lengths[b]) <= t)
{
seq_start += lengths[b];
b += 1;
}
if (lengths[b] == 0)
continue;
for (int i=0; i < O; ++i)
{
dX[t * O + i] = d_sum[b * O + i];
}
}
}
template
__global__ void backprop_reduce_mean(U* dX, const U* d_mean, const int* lengths,
int B, int T, int O)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
int seq_start = 0;
int b = 0;
for (int t = _loop_start; t < T; t += _loop_stride)
{
// Find the sequence item we're working on
while ((b < B) && (seq_start+lengths[b]) <= t)
{
seq_start += lengths[b];
b += 1;
}
if (lengths[b] == 0)
continue;
U* dX_t = &dX[t * O];
const U* d_mean_b = &d_mean[b * O];
int lengths_b = lengths[b];
for (int i=0; i < O; ++i)
{
dX_t[i] = d_mean_b[i] / lengths_b;
}
}
}
template
__global__ void backprop_reduce_max(U* dX, const U* d_maxes,
const int* which, const int* lengths, int B, int T, int O)
{
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
int seq_start = 0;
int b = 0;
for (int t = _loop_start; t < T; t += _loop_stride)
{
// We're calculating the gradient of the unpooled sequences, from
// the gradient of the maxes. In this loop, we're getting the gradient
// of a single sequence item, t. We need to know the sequence index,
// b.
while ((b < B) && (seq_start+lengths[b]) <= t)
{
seq_start += lengths[b];
b += 1;
}
if (lengths[b] == 0)
continue;
// The "which" array tells us which rows were selected as the max.
// So we need to find the index of our t in the sequence.
int index_of_t = t-seq_start;
// Get the rows we're dealing with, to avoid cluttering the loop
// with the index math.
U* dX_t = &dX[t*O];
const U* d_maxes_b = &d_maxes[b*O];
const int* which_b = &which[b*O];
// Now loop over our row.
for (int i=0; i < O; ++i)
{
// If we used the value for this cell,
// pass the gradient
if (which_b[i] == index_of_t)
dX_t[i] = d_maxes_b[i];
}
}
}
thinc-release-v9.1.1/thinc/backends/_custom_kernels.py 0000664 0000000 0000000 00000061432 14670643317 0023101 0 ustar 00root root 0000000 0000000 import operator
import re
from collections import defaultdict
from functools import reduce
from pathlib import Path
from typing import Callable, Optional, Tuple
import numpy
from ..compat import cupy, has_cupy_gpu
PWD = Path(__file__).parent
KERNELS_SRC = (PWD / "_custom_kernels.cu").read_text(encoding="utf8")
KERNELS_LIST = [
"backprop_clipped_linear",
"backprop_clipped_linear",
"backprop_dish",
"backprop_dish",
"backprop_gelu",
"backprop_gelu",
"backprop_hard_swish",
"backprop_hard_swish",
"backprop_hard_swish_mobilenet",
"backprop_hard_swish_mobilenet",
"backprop_maxout",
"backprop_maxout",
"backprop_mish",
"backprop_mish",
"backprop_reduce_max",
"backprop_reduce_max",
"backprop_reduce_mean",
"backprop_reduce_mean",
"backprop_reduce_sum",
"backprop_reduce_sum",
"backprop_seq2col",
"backprop_seq2col",
"backprop_swish",
"backprop_swish",
"clipped_linear",
"clipped_linear",
"dish",
"dish",
"gather_add",
"gather_add",
"gelu",
"gelu",
"maxout",
"maxout",
"mish",
"mish",
"pad",
"pad",
"pad",
"pad",
"reduce_max",
"reduce_max",
"reduce_sum",
"reduce_sum",
"seq2col",
"seq2col",
"swish",
"swish",
]
KERNELS = (
cupy.RawModule(
code=KERNELS_SRC, options=("--std=c++11",), name_expressions=KERNELS_LIST
)
if has_cupy_gpu
else None
)
class LazyKernel:
"""Wraps around `cupy.RawModule` and `cupy.RawKernel` to verify CuPy availability
and lazily compile the latter on first invocation.
The default CuPy behaviour triggers the compilation as soon as the `cupy.RawKernel` object
is accessed."""
name: str
_kernel: Optional["cupy.RawKernel"]
_compile_callback: Optional[Callable[[], "cupy.RawKernel"]]
__slots__ = ["name", "_kernel", "_compile_callback"]
def __init__(
self,
name: str,
*,
compile_callback: Optional[Callable[[], "cupy.RawKernel"]] = None,
) -> None:
self.name = name
self._kernel = None
self._compile_callback = compile_callback
def __call__(self, *args, **kwargs):
self._compile_kernel()
self._kernel(*args, **kwargs)
def _compile_kernel(self):
if self._kernel is not None:
return
if self._compile_callback is not None:
self._kernel = self._compile_callback()
elif KERNELS is not None:
self._kernel = KERNELS.get_function(self.name)
if self._kernel is None:
raise ValueError(f"couldn't compile Cupy kernel '{self.name}'")
def compile_mmh():
if not has_cupy_gpu:
return None
return cupy.RawKernel((PWD / "_murmur3.cu").read_text(encoding="utf8"), "hash_data")
clipped_linear_kernel_float = LazyKernel("clipped_linear")
clipped_linear_kernel_double = LazyKernel("clipped_linear")
dish_kernel_float = LazyKernel("dish")
dish_kernel_double = LazyKernel("dish")
gather_add_kernel_float = LazyKernel("gather_add")
gather_add_kernel_double = LazyKernel("gather_add")
gelu_kernel_float = LazyKernel("gelu")
gelu_kernel_double = LazyKernel("gelu")
hash_data_kernel = LazyKernel("hash_data", compile_callback=compile_mmh)
maxout_kernel_float = LazyKernel("maxout")
maxout_kernel_double = LazyKernel("maxout")
mish_kernel_float = LazyKernel("mish")
mish_kernel_double = LazyKernel("mish")
pad_kernel_float = LazyKernel("pad")
pad_kernel_double = LazyKernel("pad")
pad_kernel_int32 = LazyKernel("pad")
pad_kernel_int64 = LazyKernel("pad")
reduce_max_kernel_float = LazyKernel("reduce_max")
reduce_max_kernel_double = LazyKernel("reduce_max")
reduce_sum_kernel_float = LazyKernel("reduce_sum")
reduce_sum_kernel_double = LazyKernel("reduce_sum")
seq2col_kernel_float = LazyKernel("seq2col")
seq2col_kernel_double = LazyKernel("seq2col")
swish_kernel_float = LazyKernel("swish")
swish_kernel_double = LazyKernel("swish")
backprop_clipped_linear_kernel_double = LazyKernel("backprop_clipped_linear")
backprop_clipped_linear_kernel_float = LazyKernel("backprop_clipped_linear")
backprop_dish_kernel_double = LazyKernel("backprop_dish")
backprop_dish_kernel_float = LazyKernel("backprop_dish")
backprop_gelu_kernel_double = LazyKernel("backprop_gelu")
backprop_gelu_kernel_float = LazyKernel("backprop_gelu")
backprop_hard_swish_kernel_double = LazyKernel("backprop_hard_swish")
backprop_hard_swish_kernel_float = LazyKernel("backprop_hard_swish")
backprop_hard_swish_mobilenet_kernel_double = LazyKernel(
"backprop_hard_swish_mobilenet"
)
backprop_hard_swish_mobilenet_kernel_float = LazyKernel(
"backprop_hard_swish_mobilenet"
)
backprop_maxout_kernel_double = LazyKernel("backprop_maxout")
backprop_maxout_kernel_float = LazyKernel("backprop_maxout")
backprop_mish_kernel_double = LazyKernel("backprop_mish")
backprop_mish_kernel_float = LazyKernel("backprop_mish")
backprop_reduce_max_kernel_double = LazyKernel("backprop_reduce_max")
backprop_reduce_max_kernel_float = LazyKernel("backprop_reduce_max")
backprop_reduce_mean_kernel_double = LazyKernel("backprop_reduce_mean")
backprop_reduce_mean_kernel_float = LazyKernel("backprop_reduce_mean")
backprop_reduce_sum_kernel_double = LazyKernel("backprop_reduce_sum")
backprop_reduce_sum_kernel_float = LazyKernel("backprop_reduce_sum")
backprop_seq2col_kernel_double = LazyKernel("backprop_seq2col")
backprop_seq2col_kernel_float = LazyKernel("backprop_seq2col")
backprop_swish_kernel_double = LazyKernel("backprop_swish")
backprop_swish_kernel_float = LazyKernel("backprop_swish")
def _alloc(shape, dtype, *, zeros: bool = True):
if zeros:
return cupy.zeros(shape, dtype)
else:
return cupy.empty(shape, dtype)
def _alloc_like(array, zeros: bool = True):
if zeros:
return cupy.zeros_like(array)
else:
return cupy.empty_like(array)
def pad(seqs, round_to=1, *, threads_per_block=128, num_blocks=128):
if round_to < 1:
raise ValueError(f"Rounding for padding must at least be 1, was: {round_to}")
for seq in seqs:
_is_float_or_int_array(seq)
seq_lens = [len(seq) for seq in seqs]
max_seq_len = max(seq_lens)
# Round the length to nearest bucket -- helps on GPU, to make similar
# array sizes.
max_seq_len += -max_seq_len % round_to
seq_lens = cupy.array(seq_lens, dtype="int32")
final_shape = (len(seqs), max_seq_len) + seqs[0].shape[1:]
out = cupy.empty(final_shape, dtype=seqs[0].dtype)
# Extract pointers from CuPy arrays, so that we can address
# them in the CUDA kernel.
ptrs = numpy.empty(
(
len(
seqs,
)
),
"int64",
)
for idx, seq in enumerate(seqs):
ptrs[idx] = seq.data.ptr
ptrs = cupy.array(ptrs)
stride = reduce(operator.mul, seqs[0].shape[1:], 1)
if out.dtype == "float32":
pad_kernel_float(
(num_blocks,),
(threads_per_block,),
(out, ptrs, seq_lens, stride, len(seqs), max_seq_len),
)
elif out.dtype == "float64":
pad_kernel_double(
(num_blocks,),
(threads_per_block,),
(out, ptrs, seq_lens, stride, len(seqs), max_seq_len),
)
elif out.dtype == "int32":
pad_kernel_int32(
(num_blocks,),
(threads_per_block,),
(out, ptrs, seq_lens, stride, len(seqs), max_seq_len),
)
elif out.dtype == "int64":
pad_kernel_int64(
(num_blocks,),
(threads_per_block,),
(out, ptrs, seq_lens, stride, len(seqs), max_seq_len),
)
return out
def clipped_linear(
X,
*,
inplace=False,
slope=1.0,
offset=0.0,
min_val=0.0,
max_val=1.0,
threads_per_block=128,
num_blocks=128,
):
_is_float_array(X)
out = X
if not inplace:
out = _alloc_like(X, zeros=False)
if X.dtype == "float32":
clipped_linear_kernel_float(
(num_blocks,),
(threads_per_block,),
(out, X, slope, offset, min_val, max_val, X.size),
)
else:
clipped_linear_kernel_double(
(num_blocks,),
(threads_per_block,),
(out, X, slope, offset, min_val, max_val, X.size),
)
return out
def gather_add(table, indices, *, threads_per_block=128, num_blocks=128):
if table.ndim != 2:
raise ValueError(
f"gather_add expects table with dimensionality 2, was: {table.ndim}"
)
if indices.ndim != 2:
raise ValueError(
f"gather_add expects indices with dimensionality 2, was: {indices.ndim}"
)
_is_float_array(table)
indices = indices.astype("int32")
_check_indices(indices, table.shape[0])
B = indices.shape[0]
K = indices.shape[1]
T = table.shape[0]
O = table.shape[1]
out = _alloc((B, O), dtype=table.dtype, zeros=True)
if table.dtype == "float32":
gather_add_kernel_float(
(num_blocks,), (threads_per_block,), (out, table, indices, T, O, B, K)
)
else:
gather_add_kernel_double(
(num_blocks,), (threads_per_block,), (out, table, indices, T, O, B, K)
)
return out
def dish(X, *, inplace=False, threads_per_block=128, num_blocks=128):
_is_float_array(X)
out = X
if not inplace:
out = _alloc_like(X, zeros=False)
if X.dtype == "float32":
dish_kernel_float((num_blocks,), (threads_per_block,), (out, X, X.size))
else:
dish_kernel_double((num_blocks,), (threads_per_block,), (out, X, X.size))
return out
def gelu(X, *, inplace=False, threshold=6.0, threads_per_block=128, num_blocks=128):
_is_float_array(X)
out = X
if not inplace:
out = _alloc_like(X, zeros=False)
if X.dtype == "float32":
gelu_kernel_float(
(num_blocks,), (threads_per_block,), (out, X, threshold, X.size)
)
else:
gelu_kernel_double(
(num_blocks,), (threads_per_block,), (out, X, threshold, X.size)
)
return out
def check_seq2col_lengths(lengths, B):
if lengths is None:
lengths = cupy.array([B], dtype="int32")
else:
_check_lengths(lengths, B)
return lengths
def seq2col(seq, nW, *, lengths=None, threads_per_block=128, num_blocks=128):
_is_float_array(seq)
B = seq.shape[0]
nF = nW * 2 + 1
I = seq.shape[1]
lengths = check_seq2col_lengths(lengths, B)
nL = lengths.shape[0]
out = _alloc((B, I * nF), dtype=seq.dtype, zeros=True)
if seq.size != 0 and lengths.size != 0:
if seq.dtype == "float32":
seq2col_kernel_float(
(num_blocks,), (threads_per_block,), (out, seq, lengths, nW, B, I, nL)
)
else:
seq2col_kernel_double(
(num_blocks,), (threads_per_block,), (out, seq, lengths, nW, B, I, nL)
)
return out
def maxout(X, *, threads_per_block=128, num_blocks=128):
_is_float_array(X)
B, I, P = X.shape
out_shape = (B, I)
best = _alloc(out_shape, dtype=X.dtype, zeros=False)
which = _alloc(out_shape, dtype="i", zeros=False)
if X.dtype == "float32":
maxout_kernel_float(
(num_blocks,), (threads_per_block,), (best, which, X, B, I, P)
)
else:
maxout_kernel_double(
(num_blocks,), (threads_per_block,), (best, which, X, B, I, P)
)
return best, which
def mish(X, *, inplace=False, threshold=5, threads_per_block=128, num_blocks=128):
_is_float_array(X)
out = X
if not inplace:
out = _alloc_like(X, zeros=False)
if X.dtype == "float32":
mish_kernel_float(
(num_blocks,), (threads_per_block,), (out, X, threshold, X.size)
)
else:
mish_kernel_double(
(num_blocks,), (threads_per_block,), (out, X, threshold, X.size)
)
return out
def reduce_sum(X, lengths, *, threads_per_block=128, num_blocks=128):
_is_float_array(X)
B = len(lengths)
T = X.shape[0]
O = X.shape[1]
_check_lengths(lengths, T)
out = _alloc((B, O), dtype=X.dtype, zeros=True)
if X.dtype == "float32":
reduce_sum_kernel_float(
(num_blocks,), (threads_per_block,), (out, X, lengths, B, T, O)
)
else:
reduce_sum_kernel_double(
(num_blocks,), (threads_per_block,), (out, X, lengths, B, T, O)
)
return out
def reduce_mean(X, lengths, *, threads_per_block=128, num_blocks=128):
_is_float_array(X)
B = len(lengths)
T = X.shape[0]
O = X.shape[1]
_check_lengths(lengths, T)
out = _alloc((B, O), dtype=X.dtype, zeros=True)
if X.dtype == "float32":
reduce_sum_kernel_float(
(num_blocks,), (threads_per_block,), (out, X, lengths, B, T, O)
)
else:
reduce_sum_kernel_double(
(num_blocks,), (threads_per_block,), (out, X, lengths, B, T, O)
)
# Avoid divide by zero
out /= lengths.reshape((-1, 1)) + 1e-10
return out
def reduce_max(X, lengths, *, threads_per_block=128, num_blocks=128):
_is_float_array(X)
B = len(lengths)
T = X.shape[0]
O = X.shape[1]
_check_lengths(lengths, T, min_length=1)
out_shape = (B, O)
maxes = _alloc(out_shape, dtype=X.dtype, zeros=False)
which = _alloc(out_shape, dtype="i", zeros=False)
if X.dtype == "float32":
reduce_max_kernel_float(
(num_blocks,), (threads_per_block,), (maxes, which, X, lengths, B, T, O)
)
else:
reduce_max_kernel_double(
(num_blocks,), (threads_per_block,), (maxes, which, X, lengths, B, T, O)
)
return maxes, which
def swish(X, *, inplace=False, threshold=17.0, threads_per_block=128, num_blocks=128):
_is_float_array(X)
out = X
if not inplace:
out = _alloc_like(X, zeros=False)
if X.dtype == "float32":
swish_kernel_float(
(num_blocks,), (threads_per_block,), (out, X, threshold, X.size)
)
else:
swish_kernel_double(
(num_blocks,), (threads_per_block,), (out, X, threshold, X.size)
)
return out
def backprop_seq2col(dY, nW, *, lengths=None, threads_per_block=128, num_blocks=128):
_is_float_array(dY)
B = dY.shape[0]
nF = nW * 2 + 1
I = dY.shape[1] // nF
lengths = check_seq2col_lengths(lengths, B)
nL = lengths.shape[0]
out = _alloc((B, I), dtype=dY.dtype, zeros=True)
if dY.size != 0 and lengths.size != 0:
if dY.dtype == "float32":
backprop_seq2col_kernel_float(
(num_blocks,), (threads_per_block,), (out, dY, lengths, nW, B, I, nL)
)
else:
backprop_seq2col_kernel_double(
(num_blocks,), (threads_per_block,), (out, dY, lengths, nW, B, I, nL)
)
return out
def backprop_clipped_linear(
dY,
X,
*,
slope: float = 1.0,
offset: float = 0.0,
min_val: float = 0.0,
max_val: float = 1.0,
inplace: bool = False,
threads_per_block=128,
num_blocks=128,
):
_is_float_array(dY)
_is_float_array(X, shape=dY.shape)
out = dY
if not inplace:
out = _alloc_like(dY, zeros=False)
if dY.dtype == "float32":
backprop_clipped_linear_kernel_float(
(num_blocks,),
(threads_per_block,),
(out, dY, X, slope, offset, min_val, max_val, out.size),
)
else:
backprop_clipped_linear_kernel_double(
(num_blocks,),
(threads_per_block,),
(out, dY, X, slope, offset, min_val, max_val, out.size),
)
return out
def backprop_hard_swish(
dY, X, *, inplace: bool = False, threads_per_block=128, num_blocks=128
):
_is_float_array(dY)
_is_float_array(X, shape=dY.shape)
out = dY
if not inplace:
out = _alloc_like(dY, zeros=False)
if dY.dtype == "float32":
backprop_hard_swish_kernel_float(
(num_blocks,), (threads_per_block,), (out, dY, X, out.size)
)
else:
backprop_hard_swish_kernel_double(
(num_blocks,), (threads_per_block,), (out, dY, X, out.size)
)
return out
def backprop_hard_swish_mobilenet(
dY, X, *, inplace: bool = False, threads_per_block=128, num_blocks=128
):
_is_float_array(dY)
_is_float_array(X, shape=dY.shape)
out = dY
if not inplace:
out = _alloc_like(dY, zeros=False)
if dY.dtype == "float32":
backprop_hard_swish_mobilenet_kernel_float(
(num_blocks,), (threads_per_block,), (out, dY, X, out.size)
)
else:
backprop_hard_swish_mobilenet_kernel_double(
(num_blocks,), (threads_per_block,), (out, dY, X, out.size)
)
return out
def backprop_dish(
dY,
X,
*,
inplace: bool = False,
threads_per_block=128,
num_blocks=128,
):
_is_float_array(dY)
_is_float_array(X, shape=dY.shape)
out = dY
if not inplace:
out = _alloc_like(dY, zeros=False)
if dY.dtype == "float32":
backprop_dish_kernel_float(
(num_blocks,), (threads_per_block,), (out, dY, X, out.size)
)
else:
backprop_dish_kernel_double(
(num_blocks,), (threads_per_block,), (out, dY, X, out.size)
)
return out
def backprop_gelu(
dY,
X,
*,
inplace: bool = False,
threshold=6.0,
threads_per_block=128,
num_blocks=128,
):
_is_float_array(dY)
_is_float_array(X, shape=dY.shape)
out = dY
if not inplace:
out = _alloc_like(dY, zeros=False)
if dY.dtype == "float32":
backprop_gelu_kernel_float(
(num_blocks,), (threads_per_block,), (out, dY, X, threshold, out.size)
)
else:
backprop_gelu_kernel_double(
(num_blocks,), (threads_per_block,), (out, dY, X, threshold, out.size)
)
return out
def backprop_maxout(dY, which, P, *, threads_per_block=128, num_blocks=128):
_is_float_array(dY)
B = dY.shape[0]
I = dY.shape[1]
out = _alloc((B, I, P), dtype=dY.dtype, zeros=True)
_check_which_maxout(which, B, I, P)
if dY.dtype == "float32":
backprop_maxout_kernel_float(
(num_blocks,), (threads_per_block,), (out, dY, which, B, I, P)
)
else:
backprop_maxout_kernel_double(
(num_blocks,), (threads_per_block,), (out, dY, which, B, I, P)
)
return out
def backprop_mish(
dY, X, *, inplace: bool = False, threshold=5, threads_per_block=128, num_blocks=128
):
_is_float_array(dY)
_is_float_array(X, shape=dY.shape)
out = dY
if not inplace:
out = _alloc_like(dY, zeros=False)
if dY.dtype == "float32":
backprop_mish_kernel_float(
(num_blocks,), (threads_per_block,), (out, dY, X, threshold, dY.size)
)
else:
backprop_mish_kernel_double(
(num_blocks,), (threads_per_block,), (out, dY, X, threshold, dY.size)
)
return out
def backprop_reduce_sum(d_sums, lengths, *, threads_per_block=128, num_blocks=128):
_is_float_array(d_sums)
B = len(lengths)
T = int(lengths.sum())
O = d_sums.shape[1]
_check_lengths(lengths, T)
out = _alloc((T, O), dtype=d_sums.dtype, zeros=False)
if d_sums.dtype == "float32":
backprop_reduce_sum_kernel_float(
(num_blocks,), (threads_per_block,), (out, d_sums, lengths, B, T, O)
)
else:
backprop_reduce_sum_kernel_double(
(num_blocks,), (threads_per_block,), (out, d_sums, lengths, B, T, O)
)
return out
def backprop_reduce_mean(d_means, lengths, *, threads_per_block=128, num_blocks=128):
_is_float_array(d_means)
B = len(lengths)
T = int(lengths.sum())
O = d_means.shape[1]
_check_lengths(lengths, T)
out = _alloc((T, O), dtype=d_means.dtype, zeros=False)
if d_means.dtype == "float32":
backprop_reduce_mean_kernel_float(
(num_blocks,), (threads_per_block,), (out, d_means, lengths, B, T, O)
)
else:
backprop_reduce_mean_kernel_double(
(num_blocks,), (threads_per_block,), (out, d_means, lengths, B, T, O)
)
return out
def backprop_reduce_max(
d_maxes, which, lengths, *, threads_per_block=128, num_blocks=128
):
_is_float_array(d_maxes)
B = len(lengths)
T = int(lengths.sum())
O = d_maxes.shape[1]
_check_lengths(lengths, T, min_length=1)
out = _alloc((T, O), dtype=d_maxes.dtype, zeros=True)
_check_which_reduce_max(which, (B, O), lengths)
if d_maxes.dtype == "float32":
backprop_reduce_max_kernel_float(
(num_blocks,), (threads_per_block,), (out, d_maxes, which, lengths, B, T, O)
)
else:
backprop_reduce_max_kernel_double(
(num_blocks,), (threads_per_block,), (out, d_maxes, which, lengths, B, T, O)
)
return out
def backprop_swish(
dY, X, Y, *, inplace=False, threshold=17.0, threads_per_block=128, num_blocks=128
):
_is_float_array(dY)
_is_float_array(X, shape=dY.shape)
_is_float_array(Y, shape=dY.shape)
out = dY
if not inplace:
out = _alloc_like(dY, zeros=False)
if dY.dtype == "float32":
backprop_swish_kernel_float(
(num_blocks,), (threads_per_block,), (out, dY, X, Y, threshold, out.size)
)
else:
backprop_swish_kernel_double(
(num_blocks,), (threads_per_block,), (out, dY, X, Y, threshold, out.size)
)
return out
def hash(ids, seed, *, threads_per_block=128, num_blocks=128):
out = _alloc((ids.shape[0], 4), dtype="uint32", zeros=True)
# sizeof(uint32_t) * 4
out_size = 4 * 4
in_size = 8 # sizeof(uint64_t)
# T = ids.shape[0]
hash_data_kernel(
(num_blocks,),
(threads_per_block,),
(out, ids, out_size, in_size, ids.shape[0], seed),
)
return out
def _is_float_array(out, *, shape: Optional[Tuple] = None):
assert out.dtype in (
"float32",
"float64",
), "CUDA kernel can only handle float32 and float64"
if shape is not None and out.shape != shape:
msg = f"array has incorrect shape, expected: {shape}, was: {out.shape}"
raise ValueError(msg)
def _is_float_or_int_array(out, *, shape: Optional[Tuple] = None):
assert out.dtype in (
"float32",
"float64",
"int32",
"int64",
), "CUDA kernel can only handle float32, float64, int32 and int64"
if shape is not None and out.shape != shape:
msg = f"array has incorrect shape, expected: {shape}, was: {out.shape}"
raise ValueError(msg)
def _check_lengths(lengths, n_elems: int, *, min_length=0):
assert lengths.dtype == "int32", "lengths should be encoded as 32-bit integers"
if not cupy.all(lengths >= min_length):
raise ValueError(f"all sequence lengths must be >= {min_length}")
if cupy.sum(lengths) != n_elems:
raise IndexError("lengths must sum up to the batch size")
def _check_indices(indices, n: int):
assert indices.dtype == "int32", "indices should be encoded as 32-bit integers"
if not _values_within_range(indices, 0, n):
raise IndexError(f"index out of bounds, must be >= 0 && < {n}")
def _check_which_maxout(which, B: int, I: int, P: int):
shape = (B, I)
msg = "maximum index (which) should be encoded as 32-bit integers"
assert which.dtype == "int32", msg
if which.shape != shape:
msg = f"maximum index (which) has incorrect shape, expected: {shape}, was: {which.shape}"
raise ValueError(msg)
if not _values_within_range(which, 0, P):
raise IndexError("maximum index (which) value out of bounds")
_values_within_range = (
cupy.ReductionKernel(
"T x, T lower, T upper",
"bool r",
"x >= lower && x < upper",
"a & b",
"r = a",
"true",
"within_range",
)
if has_cupy_gpu
else None
)
def _check_which_reduce_max(which, shape: Tuple, lengths):
msg = "maximum index (which) should be encoded as 32-bit integers"
assert which.dtype == "int32", msg
if which.shape != shape:
msg = f"maximum index (which) has incorrect shape, expected: {shape}, was: {which.shape}"
raise ValueError(msg)
if not cupy.all((which >= 0) & (which < cupy.expand_dims(lengths, -1))):
raise IndexError("maximum index (which) value out of bounds")
thinc-release-v9.1.1/thinc/backends/_murmur3.cu 0000664 0000000 0000000 00000012132 14670643317 0021426 0 ustar 00root root 0000000 0000000 //-----------------------------------------------------------------------------
// MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
// Note - The x86 and x64 versions do _not_ produce the same results, as the
// algorithms are optimized for their respective platforms. You can still
// compile and run any of them on any platform, but your performance with the
// non-native version will be less than optimal.
/*
* This version is taken from https://github.com/PeterScott/murmur3
* and modified to work with CUDA.
*/
// Including stdint.h is a pain in cupy, so just put the declarations in.
// Beware that long int is not 64bit on all platforms!
// e.g. Windows requires a 'long long' to get to 64 bit.
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
typedef signed char int8_t;
typedef int int32_t;
const unsigned long int test_var = 0;
const int size = sizeof(test_var);
#if size == 64
typedef unsigned long int uint64_t;
typedef long int int64_t;
#else
typedef unsigned long long uint64_t;
typedef long long int64_t;
#endif
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
#define FORCE_INLINE
__device__ static inline FORCE_INLINE uint64_t rotl64 ( uint64_t x, int8_t r )
{
return (x << r) | (x >> (64 - r));
}
#define ROTL64(x,y) rotl64(x,y)
#define BIG_CONSTANT(x) (x##LLU)
//-----------------------------------------------------------------------------
// Block read - if your platform needs to do endian-swapping or can only
// handle aligned reads, do the conversion here
#define getblock(p, i) (p[i])
//-----------------------------------------------------------------------------
// Finalization mix - force all bits of a hash block to avalanche
__device__ static inline FORCE_INLINE uint32_t fmix32 ( uint32_t h )
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
//----------
__device__ static inline FORCE_INLINE uint64_t fmix64 ( uint64_t k )
{
k ^= k >> 33;
k *= BIG_CONSTANT(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return k;
}
//-----------------------------------------------------------------------------
__device__ void MurmurHash3_x64_128 ( const void * key, const int len,
const uint32_t seed, void * out )
{
const uint8_t * data = (const uint8_t*)key;
const int nblocks = len / 16;
int i;
uint64_t h1 = seed;
uint64_t h2 = seed;
uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
//----------
// body
const uint64_t * blocks = (const uint64_t *)(data);
for(i = 0; i < nblocks; i++)
{
uint64_t k1 = getblock(blocks,i*2+0);
uint64_t k2 = getblock(blocks,i*2+1);
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;
}
//----------
// tail
const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
uint64_t k1 = 0;
uint64_t k2 = 0;
switch(len & 15)
{
case 15: k2 ^= (uint64_t)(tail[14]) << 48;
case 14: k2 ^= (uint64_t)(tail[13]) << 40;
case 13: k2 ^= (uint64_t)(tail[12]) << 32;
case 12: k2 ^= (uint64_t)(tail[11]) << 24;
case 11: k2 ^= (uint64_t)(tail[10]) << 16;
case 10: k2 ^= (uint64_t)(tail[ 9]) << 8;
case 9: k2 ^= (uint64_t)(tail[ 8]) << 0;
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= (uint64_t)(tail[ 7]) << 56;
case 7: k1 ^= (uint64_t)(tail[ 6]) << 48;
case 6: k1 ^= (uint64_t)(tail[ 5]) << 40;
case 5: k1 ^= (uint64_t)(tail[ 4]) << 32;
case 4: k1 ^= (uint64_t)(tail[ 3]) << 24;
case 3: k1 ^= (uint64_t)(tail[ 2]) << 16;
case 2: k1 ^= (uint64_t)(tail[ 1]) << 8;
case 1: k1 ^= (uint64_t)(tail[ 0]) << 0;
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
};
//----------
// finalization
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 = fmix64(h1);
h2 = fmix64(h2);
h1 += h2;
h2 += h1;
((uint64_t*)out)[0] = h1;
((uint64_t*)out)[1] = h2;
}
//-----------------------------------------------------------------------------
// Write a stream of hash values for an input stream. Each output may have up to 128 bits
// of entropy. Input size should be specified in bytes.
extern "C" __global__
void hash_data(char* dest,
const char* src, size_t out_size, size_t in_size, size_t n_items, uint32_t seed)
{
char entropy[16]; // 128/8=16
int _loop_start = blockIdx.x * blockDim.x + threadIdx.x;
int _loop_stride = blockDim.x * gridDim.x;
for (int i = _loop_start; i < n_items; i += _loop_stride)
{
const char* src_i = &src[i*in_size];
char* dest_i = &dest[i*out_size];
MurmurHash3_x64_128(src_i, in_size, seed, entropy);
for (int j=0; j < out_size; ++j)
dest_i[j] = entropy[j];
}
}
thinc-release-v9.1.1/thinc/backends/_param_server.py 0000664 0000000 0000000 00000005264 14670643317 0022533 0 ustar 00root root 0000000 0000000 from typing import Any, Dict, Optional, Tuple
from ..types import FloatsXd
from ..util import get_array_module
KeyT = Tuple[int, str]
class ParamServer:
"""Serve parameters for a single process."""
_params: Dict[KeyT, FloatsXd] = {}
_grads: Dict[KeyT, FloatsXd] = {}
proxy: Optional[Any]
def __init__(
self,
params: Dict[KeyT, FloatsXd] = {},
grads: Dict[KeyT, FloatsXd] = {},
*,
proxy=None
):
self._params = dict(params)
self._grads = dict(grads)
# Allow a 'proxy' to be provided to support remote parameters. This
# is experimental, it's the mechanism we use in the Ray integration.
self.proxy = proxy
@property
def param_keys(self) -> Tuple[KeyT, ...]:
"""Get the names of registered parameter (including unset)."""
return tuple(self._params.keys())
@property
def grad_keys(self) -> Tuple[KeyT, ...]:
return tuple([key for key in self.param_keys if self.has_grad(*key)])
def has_param(self, model_id: int, name: str) -> bool:
return (model_id, name) in self._params
def has_grad(self, model_id: int, name: str) -> bool:
return (model_id, name) in self._grads
def get_param(self, model_id: int, name: str) -> FloatsXd:
key = (model_id, name)
if self.proxy is not None:
self._params[key] = self.proxy.get_param(model_id, name)
return self._params[key]
def get_grad(self, model_id: int, name: str) -> FloatsXd:
key = (model_id, name)
return self._grads[key]
def set_param(self, model_id: int, name: str, value: FloatsXd) -> None:
if self.proxy is not None:
self.proxy.set_param(model_id, name, value)
self._params[(model_id, name)] = value
def set_grad(self, model_id: int, name: str, value: FloatsXd) -> None:
if self.proxy is not None:
self.proxy.set_grad(model_id, name, value)
else:
self._grads[(model_id, name)] = value
def inc_grad(self, model_id: int, name: str, value: FloatsXd) -> None:
key = (model_id, name)
if self.proxy is not None:
self.proxy.inc_grad(model_id, name, value)
elif not self.has_grad(model_id, name): # pragma: no cover
if hasattr(value, "copy"):
# Adjustment for Jax
self._grads[key] = value.copy()
elif not value.flags["C_CONTIGUOUS"]:
xp = get_array_module(value)
self._grads[(model_id, name)] = xp.ascontiguousarray(value)
else:
self._grads[(model_id, name)] = value
else:
self._grads[(model_id, name)] += value
thinc-release-v9.1.1/thinc/backends/apple_ops.pyx 0000664 0000000 0000000 00000001764 14670643317 0022061 0 ustar 00root root 0000000 0000000 from typing import Optional
import numpy
from ._accelerate import gemm
from ._accelerate cimport saxpy, sgemm
from .cblas cimport CBlas, set_saxpy, set_sgemm
from .. import registry
from ..types import Floats2d
from .numpy_ops import NumpyOps
@registry.ops("AppleOps")
class AppleOps(NumpyOps):
"""Thinc Ops class that calls into Apple's native libraries for some
operations. Other operations fall back to numpy."""
name = "apple"
xp = numpy
def cblas(self) -> CBlas:
cdef CBlas cblas = CBlas()
set_saxpy(cblas, saxpy)
set_sgemm(cblas, sgemm)
return cblas
def gemm(
self,
x: Floats2d,
y: Floats2d,
out: Optional[Floats2d] = None,
trans1: bool = False,
trans2: bool = False,
) -> Floats2d:
"""Perform General Matrix Multiplication (GeMM) and optionally store
the result in the specified output variable.
"""
return gemm(x, y, out=out, trans1=trans1, trans2=trans2)
thinc-release-v9.1.1/thinc/backends/cblas.pxd 0000664 0000000 0000000 00000003752 14670643317 0021135 0 ustar 00root root 0000000 0000000 from libcpp.memory cimport shared_ptr
ctypedef void (*sgemm_ptr)(bint transA, bint transB, int M, int N, int K,
float alpha, const float* A, int lda, const float* B,
int ldb, float beta, float* C, int ldc) nogil
ctypedef void (*dgemm_ptr)(bint transA, bint transB, int M, int N, int K,
double alpha, const double* A, int lda, const double* B,
int ldb, double beta, double* C, int ldc) nogil
ctypedef void (*saxpy_ptr)(int N, float alpha, const float* X, int incX,
float *Y, int incY) nogil
ctypedef void (*daxpy_ptr)(int N, double alpha, const double* X, int incX,
double *Y, int incY) nogil
ctypedef void (*sscal_ptr)(int N, float alpha, float* X, int incX) nogil
ctypedef void (*dscal_ptr)(int N, double alpha, double* X, int incX) nogil
# Forward-declaration of the BlasFuncs struct. This struct must be opaque, so
# that consumers of the CBlas class cannot become dependent on its size or
# ordering.
cdef struct BlasFuncs
cdef class CBlas:
cdef shared_ptr[BlasFuncs] ptr
# Note: the following functions are intentionally standalone. If we make them
# methods of CBlas, Cython will generate and use a vtable. This makes it
# impossible to add new BLAS functions later without breaking the ABI.
#
# See https://github.com/explosion/thinc/pull/700 for more information.
cdef daxpy_ptr daxpy(CBlas cblas) nogil
cdef saxpy_ptr saxpy(CBlas cblas) nogil
cdef sgemm_ptr sgemm(CBlas cblas) nogil
cdef dgemm_ptr dgemm(CBlas cblas) nogil
cdef sscal_ptr sscal(CBlas cblas) nogil
cdef dscal_ptr dscal(CBlas cblas) nogil
cdef void set_daxpy(CBlas cblas, daxpy_ptr daxpy) nogil
cdef void set_saxpy(CBlas cblas, saxpy_ptr saxpy) nogil
cdef void set_sgemm(CBlas cblas, sgemm_ptr sgemm) nogil
cdef void set_dgemm(CBlas cblas, dgemm_ptr dgemm) nogil
cdef void set_sscal(CBlas cblas, sscal_ptr sscal) nogil
cdef void set_dscal(CBlas cblas, dscal_ptr dscal) nogil
thinc-release-v9.1.1/thinc/backends/cblas.pyx 0000664 0000000 0000000 00000004034 14670643317 0021154 0 ustar 00root root 0000000 0000000 # cython: profile=False
cimport blis.cy
from cython.operator cimport dereference as deref
from libcpp.memory cimport make_shared
# Single- and double-precision wrappers for `blis.cy.scalv`
cdef void blis_sscal(int N, float alpha, float* X, int incX) nogil:
blis.cy.scalv(blis.cy.NO_CONJUGATE, N, alpha, X, incX)
cdef void blis_dscal(int N, double alpha, double* X, int incX) nogil:
blis.cy.scalv(blis.cy.NO_CONJUGATE, N, alpha, X, incX)
cdef struct BlasFuncs:
daxpy_ptr daxpy
saxpy_ptr saxpy
sgemm_ptr sgemm
dgemm_ptr dgemm
sscal_ptr sscal
dscal_ptr dscal
cdef class CBlas:
__slots__ = []
def __init__(self):
"""Construct a CBlas instance set to use BLIS implementations of the
supported BLAS functions."""
cdef BlasFuncs funcs
funcs.daxpy = blis.cy.daxpy
funcs.saxpy = blis.cy.saxpy
funcs.sgemm = blis.cy.sgemm
funcs.dgemm = blis.cy.dgemm
funcs.sscal = blis_sscal
funcs.dscal = blis_dscal
self.ptr = make_shared[BlasFuncs](funcs)
cdef daxpy_ptr daxpy(CBlas cblas) nogil:
return deref(cblas.ptr).daxpy
cdef saxpy_ptr saxpy(CBlas cblas) nogil:
return deref(cblas.ptr).saxpy
cdef sgemm_ptr sgemm(CBlas cblas) nogil:
return deref(cblas.ptr).sgemm
cdef dgemm_ptr dgemm(CBlas cblas) nogil:
return deref(cblas.ptr).dgemm
cdef sscal_ptr sscal(CBlas cblas) nogil:
return deref(cblas.ptr).sscal
cdef dscal_ptr dscal(CBlas cblas) nogil:
return deref(cblas.ptr).dscal
cdef void set_daxpy(CBlas cblas, daxpy_ptr daxpy) nogil:
deref(cblas.ptr).daxpy = daxpy
cdef void set_saxpy(CBlas cblas, saxpy_ptr saxpy) nogil:
deref(cblas.ptr).saxpy = saxpy
cdef void set_sgemm(CBlas cblas, sgemm_ptr sgemm) nogil:
deref(cblas.ptr).sgemm = sgemm
cdef void set_dgemm(CBlas cblas, dgemm_ptr dgemm) nogil:
deref(cblas.ptr).dgemm = dgemm
cdef void set_sscal(CBlas cblas, sscal_ptr sscal) nogil:
deref(cblas.ptr).sscal = sscal
cdef void set_dscal(CBlas cblas, dscal_ptr dscal) nogil:
deref(cblas.ptr).dscal = dscal
thinc-release-v9.1.1/thinc/backends/cpu_kernels.hh 0000664 0000000 0000000 00000034411 14670643317 0022163 0 ustar 00root root 0000000 0000000 #ifndef CPU_KERNELS_HH
#define CPU_KERNELS_HH
#include
#include
#include
#include
#include
#include
// Ideally we'd use an alias declaration for a generic definition of
// *axpy. But Cython doesn't support alias declarations yet:
//
// https://github.com/cython/cython/issues/3272
//
// template
// using axpy = void (*)(int N, T alpha, const T* X, int incX,
// T *Y, int incY);
//
// So, instead we'll do this the pre-C++11 way:
template
struct axpy {
typedef void (*ptr)(int N, T alpha, const T* X, int incX, T *Y, int incY);
};
// All elementwise functions, such as most activations, work in-place.
template
struct argmax_result {
T max;
L max_idx;
};
template
argmax_result argmax(T const *arr, L len)
{
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
argmax_result r { arr[0], 0 };
for (L i = 1; i < len; ++i) {
if (arr[i] > r.max) {
r.max = arr[i];
r.max_idx = i;
}
}
return r;
}
// The next two templates define argmax for a fixed number of elements.
template
argmax_result argmax(T a) {
static_assert(std::is_floating_point::value, "Argument should be floating point");
argmax_result acc { a, 0 };
return acc;
}
template
argmax_result argmax(T a, Args... args) {
static_assert(std::is_floating_point::value, "Arguments should be floating point");
auto acc = argmax(args...);
if (acc.max > a) {
acc.max_idx += 1;
} else {
acc.max_idx = 0;
acc.max = a;
}
return acc;
}
template
void vec_add(A* X, const A* Y, A scale, L N)
{
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
for (L i = 0; i < N; ++i)
X[i] += scale * Y[i];
}
template
void cpu_maxout(A* best__bo, L* which__bo, const A* cands__bop, L B, L O, L P)
{
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
// For small inputs, we use an unrolled argmax.
if (P == 2) {
for (int i = 0; i < B * O; ++i) {
A const *input = cands__bop + i * P;
auto r = argmax(input[0], input[1]);
which__bo[i] = r.max_idx;
best__bo[i] = r.max;
}
} else if (P == 3) {
for (int i = 0; i < B * O; ++i) {
A const *input = cands__bop + i * P;
auto r = argmax(input[0], input[1], input[2]);
which__bo[i] = r.max_idx;
best__bo[i] = r.max;
}
} else {
for (int i = 0; i < B * O; ++i) {
auto r = argmax(cands__bop + i * P, P);
which__bo[i] = r.max_idx;
best__bo[i] = r.max;
}
}
}
template
void cpu_backprop_maxout(A* dX__bop, const A* dX__bo, const L* which__bo,
L B, L O, L P)
{
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
for (L b = 0; b < B; ++b) {
for (L o = 0; o < O; ++o) {
if (*which__bo >= P) {
throw std::out_of_range(std::string("index ") + std::to_string(*which__bo) + " is out of bounds for maxout with size " + std::to_string(P));
}
dX__bop[*which__bo] = *dX__bo;
dX__bop += P;
dX__bo += 1;
which__bo += 1;
}
}
}
template
void cpu_reduce_max(A* maxes__bo, L* which__bo, const A* X__to,
const L* lengths__b, L B, L T, L O)
{
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
for (const L* length = lengths__b; length < lengths__b + B; ++length) {
if (*length <= 0)
throw std::invalid_argument(std::string("all sequence lengths must be > 0, was: ") + std::to_string(*length));
else if (*length > T) {
throw std::out_of_range("lengths must sum up to the number of rows");
}
T -= *length;
std::memcpy(maxes__bo, X__to, O * sizeof(*maxes__bo));
X__to += O;
for (L i = 1; i < *length; ++i) {
for (L j = 0; j < O; ++j) {
if (X__to[j] > maxes__bo[j]) {
maxes__bo[j] = X__to[j];
which__bo[j] = i;
}
}
X__to += O;
}
maxes__bo += O;
which__bo += O;
}
}
template
void cpu_backprop_reduce_max(A* dX__to, const A* d_maxes__bo, const L* which__bo,
const L* lengths__b, L B, L T, L O)
{
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
for (const L* length = lengths__b; length < lengths__b + B; ++length) {
for (L i = 0; i < O; ++i) {
L item = which__bo[i];
if (item >= *length) {
throw std::out_of_range(std::string("index ") + std::to_string(item) + " is out of bounds for maxout with length " + std::to_string(*length));
}
dX__to[item * O + i] = d_maxes__bo[i];
}
dX__to += *length * O;
d_maxes__bo += O;
which__bo += O;
}
}
template
void cpu_reduce_mean(A* means__bo, const A* X__to, const L* lengths__b,
L B, L T, L O)
{
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
for (const L* length = lengths__b; length < lengths__b + B; ++length) {
if (*length < 0) {
throw std::invalid_argument(std::string("all sequence lengths must be >= 0, was: ") + std::to_string(*length));
}
else if (length == 0) {
means__bo += O;
continue;
}
else if (*length > T) {
throw std::out_of_range("lengths must sum up to the number of rows");
}
T -= *length;
A scale = 1. / *length;
for (L i = 0; i < *length; ++i) {
vec_add(means__bo, X__to, scale, O);
X__to += O;
}
means__bo += O;
}
}
template
void cpu_backprop_reduce_mean(A* dX__to, const A* d_means__bo, const L* lengths__b,
L B, L T, L O)
{
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
for (const L* length = lengths__b; length < lengths__b + B; ++length) {
A scale = 1. / *length;
for (L i = 0; i < *length; ++i) {
vec_add(dX__to, d_means__bo, scale, O);
dX__to += O;
}
d_means__bo += O;
}
}
template
void cpu_mish(A* Y, L N, A threshold)
{
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
for (L i = 0; i < N; ++i) {
if (Y[i] < threshold) {
Y[i] *= std::tanh(std::log(1.0 + std::exp(Y[i])));
}
}
}
template
void cpu_backprop_mish(A* dX, const A* X, L N, A threshold)
{
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
for (L i = 0; i < N; ++i) {
A x = X[i];
if (x < threshold) {
A exp_x = std::exp(x);
A exp_2x = std::exp(2 * x);
A exp_3x = std::exp(3 * x);
A omega = (4. * (x + 1)) + (4 * exp_2x) + exp_3x + exp_x * (4. * x + 6);
A delta = 2. * exp_x + exp_2x + 2.;
dX[i] = dX[i] * ((exp_x * omega) / (delta * delta));
}
}
}
template
void cpu_reduce_sum(A* sums__bo, const A* X__to, const L* lengths__b,
L B, L T, L O)
{
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
for (const L* length = lengths__b; length < lengths__b + B; ++length) {
if (*length < 0) {
throw std::invalid_argument(std::string("all sequence lengths must be >= 0, was: ") + std::to_string(*length));
}
else if (length == 0) {
sums__bo += O;
continue;
}
else if (*length > T) {
throw std::out_of_range("lengths must sum up to the number of rows");
}
T -= *length;
for (L i = 0; i < *length; ++i) {
vec_add(sums__bo, X__to, static_cast(1.0), O);
X__to += O;
}
sums__bo += O;
}
}
template
void cpu_backprop_reduce_sum(A* dX__to, const A* d_sums__bo, const L* lengths__b,
L B, L T, L O)
{
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
for (const L* length = lengths__b; length < lengths__b + B; ++length) {
for (L i = 0; i < *length; ++i) {
vec_add(dX__to, d_sums__bo, static_cast(1.0), O);
dX__to += O;
}
d_sums__bo += O;
}
}
template
void cpu_relu(A* X, L N)
{
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
for (L i = 0; i < N; ++i) {
if (X[i] <= 0.0) {
X[i] = 0.0;
}
}
}
template
void seq2col(A* output, const A* X, const L* lengths, L nW, L B, L I, L nL)
{
// Let's say nW is 1 (it usually is). Then we want to take:
// 1a 1b 1c
// 2a 2b 2c
// 3a 3b 3c
// And make
// __ __ __ 1a 1b 1c 2a 2b 2c
// 1a 1b 1c 2a 2b 2c 3a 3b 3c
// 2a 2b 2c 3a 3b 3c __ __ __
// Where __ is padding.
// Now let's say nW is 2. Then we want to take:
// 1a 1b 1c
// 2a 2b 2c
// 3a 3b 3c
// And make
// __ __ __ __ __ __ 1a 1b 1c 2a 2b 2c 3a 3b 3c
// __ __ __ 1a 1b 1c 2a 2b 2c 3a 3b 3c __ __ __
// 1a 1b 1c 2a 2b 2c 3a 3b 3c __ __ __ __ __ __
// * x_start=-6, x_end=9 : (0-2) * 3, (0+2+1) * 3
// * x_start=-3, x_end=13 : (1-2) * 3, (1+2+1) * 3
// * x_start=0, x_end=16 : (2-2) * 3, (2+2+1) * 3
// If lengths > 1, then the sequence lengths dictate
// the boundaries/padding rather than the begin/end
// of X.
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
L nF = nW * 2 + 1;
L seq_start = 0;
for (L i = 0; i < nL; ++i) {
// Calculate the bounds of the next sequence.
L seq_end = seq_start + lengths[i];
for (L j = seq_start; j < seq_end; ++j) {
// Find the unconstrained window around b, which
// may be out of the sequence bounds.
L window_start = j - nW;
L window_end = j + nW + 1;
// Find the sequence-constrained window around b.
L x_start = std::max(seq_start, window_start);
L x_end = std::min(seq_end, window_end);
L n_elems = x_end - x_start;
L out_offset = x_start - window_start;
std::memcpy(output + (j * nF * I) + (out_offset * I),
X + (x_start * I),
n_elems * I * sizeof(*output));
}
seq_start += lengths[i];
}
}
template
void backprop_seq2col(A* d_seqs, const A* d_cols, const L* lengths, L B, L I, L nW, L nL)
{
// here's what we're doing, if we had 2d indexing.
// for i in range(b):
// d_seq[i] += d_cols[i-2, 4]
// d_seq[i] += d_cols[i-1, 3]
// d_seq[i] += d_cols[i, 2]
// d_seq[i] += d_cols[i+1, 1]
// d_seq[i] += d_cols[i+2, 0]
static_assert(std::is_floating_point::value,
"Array should be floating point");
static_assert(std::is_integral::value, "Array length should be integral");
L nF = nW * 2 + 1;
L seq_start = 0;
for (L i = 0; i < nL; ++i) {
// Calculate the bounds of the next sequence.
L seq_end = seq_start + lengths[i];
for (L j = seq_start; j < seq_end; ++j) {
// Find the unconstrained window around b, which
// may be out of the sequence bounds.
L window_begin = j - nW;
L window_end = j + nW + 1;
// Find the sequence-constrained window around b.
L d_seqs_begin = std::max(seq_start, window_begin);
L d_seqs_end = std::min(seq_end, window_end);
L n_elems = d_seqs_end - d_seqs_begin;
// If the left window is cut short, we want to
// start by the same amount in the output.
L out_offset = d_seqs_begin - window_begin;
vec_add(d_seqs + d_seqs_begin * I,
d_cols + (j * nF * I) + (out_offset * I),
static_cast(1.), n_elems * I);
}
seq_start += lengths[i];
}
}
template
void cpu_gather_add(typename axpy::ptr axpy, F* out_bo, const F* table_to, const I* indices_bk, L T, L O, L B, L K) {
for (L b = 0; b < B; ++b) {
for (L k = 0; k < K; ++k) {
I idx = indices_bk[b * K + k];
if (idx > T) {
throw std::out_of_range("Embedding index out-of-bounds");
}
axpy(O, 1.0, table_to + idx * O, 1, out_bo + b * O, 1);
}
}
}
#endif // CPU_KERNELS_HH
thinc-release-v9.1.1/thinc/backends/cupy_ops.py 0000664 0000000 0000000 00000031226 14670643317 0021544 0 ustar 00root root 0000000 0000000 import numpy
from .. import registry
from ..compat import cublas, cupy, cupyx
from ..types import DeviceTypes
from ..util import (
is_cupy_array,
is_mxnet_gpu_array,
is_tensorflow_gpu_array,
is_torch_cuda_array,
mxnet2xp,
tensorflow2xp,
torch2xp,
)
from . import _custom_kernels
from .numpy_ops import NumpyOps
from .ops import Ops
@registry.ops("CupyOps")
class CupyOps(Ops):
name = "cupy"
xp = cupy
_xp2 = cupyx
def __init__(
self, device_type: DeviceTypes = "gpu", device_id: int = 0, **kwargs
) -> None:
self.device_type = device_type
self.device_id = device_id
def to_numpy(self, data, *, byte_order=None):
if not isinstance(data, numpy.ndarray):
data = data.get()
if byte_order:
dtype = data.dtype.newbyteorder(byte_order)
data = numpy.asarray(data, dtype=dtype)
return data
def gather_add(self, table, indices):
if table.dtype in ("float32", "float64"):
return _custom_kernels.gather_add(table, indices)
else:
return super().gather_add(table, indices)
def dish(self, X, inplace=False):
if X.dtype in ("float32", "float64"):
return _custom_kernels.dish(X, inplace=inplace)
else:
return super().dish(X, inplace=inplace)
def backprop_dish(self, dY, X, inplace=False):
if X.dtype == dY.dtype and X.dtype in ("float32", "float64"):
return _custom_kernels.backprop_dish(dY, X, inplace=inplace)
else:
return super().backprop_dish(dY, X, inplace=inplace)
def gelu(self, X, inplace=False):
if X.dtype in ("float32", "float64"):
return _custom_kernels.gelu(X, inplace=inplace, threshold=6.0)
else:
return super().gelu(X, inplace=inplace)
def backprop_gelu(self, dY, X, inplace=False):
if X.dtype == dY.dtype and X.dtype in ("float32", "float64"):
return _custom_kernels.backprop_gelu(dY, X, inplace=inplace, threshold=6.0)
else:
return super().backprop_gelu(dY, X, inplace=inplace)
def gemm(self, x, y, out=None, trans1=False, trans2=False):
if isinstance(x, numpy.ndarray) or isinstance(y, numpy.ndarray):
raise ValueError(
"Encountered a numpy array when processing with cupy. "
"Did you call model.ops.asarray on your data?"
)
if trans1:
x = x.T
if trans2:
y = y.T
if out is None:
return self.xp.dot(x, y)
else:
self.xp.dot(x, y, out=out)
return out
def asarray(self, data, dtype=None):
# We'll try to perform a zero-copy conversion if possible.
if is_cupy_array(data):
array = self.xp.asarray(data, dtype=dtype)
elif is_torch_cuda_array(data):
array = torch2xp(data)
elif is_tensorflow_gpu_array(data):
array = tensorflow2xp(data)
elif is_mxnet_gpu_array(data):
array = mxnet2xp(data)
else:
array = self.xp.array(data, dtype=dtype)
if dtype is not None:
array = array.astype(dtype=dtype, copy=False)
return array
def pad(self, seqs, round_to=1):
"""Perform padding on a list of arrays so that they each have the same
length, by taking the maximum dimension across each axis. This only
works on non-empty sequences with the same `ndim` and `dtype`.
"""
# TODO: This should be generalized to handle different ranks
if not seqs:
raise ValueError("Cannot pad empty sequence")
if len(set(seq.ndim for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences with different ndims")
if len(set(seq.dtype for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences with different dtypes")
if len(set(seq.shape[1:] for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences that differ on other dimensions")
# Our CUDA kernel can currently only handle C contiguous arrays.
if not all(seq.flags["C_CONTIGUOUS"] for seq in seqs) or seqs[0].dtype not in (
"float32",
"float64",
"int32",
"int64",
):
return super().pad(seqs, round_to)
return _custom_kernels.pad(seqs, round_to)
def maxout(self, X):
if X.dtype in ("float32", "float64"):
return _custom_kernels.maxout(X)
else:
return super().maxout(X)
def backprop_maxout(self, dY, which, P):
if dY.dtype in ("float32", "float64") and which.dtype == "int32":
return _custom_kernels.backprop_maxout(dY, which, P)
else:
return super().backprop_maxout(dY, which, P)
def relu(self, X, inplace=False):
if not inplace:
return X * (X > 0)
else:
X *= X > 0
return X
def backprop_relu(self, dY, Y, inplace=False):
if not inplace:
return dY * (Y > 0)
dY *= Y > 0
return dY
def clipped_linear(
self,
X,
slope: float = 1.0,
offset: float = 0.0,
min_val: float = 0.0,
max_val: float = 1.0,
inplace: bool = False,
):
if X.dtype in ("float32", "float64"):
return _custom_kernels.clipped_linear(
X,
inplace=inplace,
slope=slope,
offset=offset,
min_val=min_val,
max_val=max_val,
)
else:
return super().clipped_linear(
X,
inplace=inplace,
slope=slope,
offset=offset,
min_val=min_val,
max_val=max_val,
)
def backprop_clipped_linear(
self,
dY,
X,
slope: float = 1.0,
offset: float = 0.0,
min_val: float = 0.0,
max_val: float = 1.0,
inplace: bool = False,
):
if X.dtype == dY.dtype and X.dtype in ("float32", "float64"):
return _custom_kernels.backprop_clipped_linear(
dY,
X,
slope=slope,
offset=offset,
min_val=min_val,
max_val=max_val,
inplace=inplace,
)
else:
return super().backprop_clipped_linear(
dY=dY,
X=X,
slope=slope,
offset=offset,
min_val=min_val,
max_val=max_val,
inplace=inplace,
)
def backprop_hard_swish(self, dY, X, inplace: bool = False):
if X.dtype == dY.dtype and X.dtype in ("float32", "float64"):
return _custom_kernels.backprop_hard_swish(dY, X, inplace=inplace)
else:
return super().backprop_hard_swish(dY, X, inplace=inplace)
def backprop_hard_swish_mobilenet(self, dY, X, inplace: bool = False):
if X.dtype == dY.dtype and X.dtype in ("float32", "float64"):
return _custom_kernels.backprop_hard_swish_mobilenet(dY, X, inplace=inplace)
else:
return super().backprop_hard_swish_mobilenet(dY, X, inplace=inplace)
def mish(self, X, threshold=20.0, inplace=False):
if X.dtype in ("float32", "float64"):
return _custom_kernels.mish(X, inplace=inplace, threshold=threshold)
else:
return super().mish(X, threshold, inplace)
def backprop_mish(self, dY, X, threshold=20.0, inplace=False):
if X.dtype == dY.dtype and X.dtype in ("float32", "float64"):
return _custom_kernels.backprop_mish(
dY, X, inplace=inplace, threshold=threshold
)
else:
return super().backprop_mish(dY, X, threshold, inplace)
def swish(self, X, inplace=False):
if X.dtype in ("float32", "float64"):
return _custom_kernels.swish(X, inplace=inplace, threshold=17.0)
else:
return super().swish(X, inplace=inplace)
def backprop_swish(self, dY, X, Y, inplace=False):
if X.dtype == dY.dtype == Y.dtype and X.dtype in ("float32", "float64"):
return _custom_kernels.backprop_swish(
dY, X, Y, inplace=inplace, threshold=17.0
)
else:
return super().backprop_swish(dY, X, Y, inplace=inplace)
def clip_gradient(self, gradient, threshold):
# We do not use CuPy's linalg.norm, since it uses scalar reductions
# using one CUDA block. This is a lot slower than the cuBLAS
# implementation.
def frobenius_norm(X):
X_vec = X.reshape(-1)
return cublas.nrm2(X_vec)
grad_norm = cupy.maximum(frobenius_norm(gradient), 1e-12)
gradient *= cupy.minimum(threshold, grad_norm) / grad_norm
return gradient
def seq2col(self, seq, nW, *, lengths=None):
"""Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence.
The new sequence is constructed by concatenating nW preceding and succeeding
vectors onto each column in the sequence, to extract a window of features.
"""
if seq.dtype in ("float32", "float64") and (
lengths is None or lengths.dtype == "int32"
):
return _custom_kernels.seq2col(seq, nW, lengths=lengths)
else:
return super().seq2col(seq, nW, lengths=lengths)
def backprop_seq2col(self, dY, nW, *, lengths=None):
if dY.dtype in ("float32", "float64") and (
lengths is None or lengths.dtype == "int32"
):
return _custom_kernels.backprop_seq2col(dY, nW, lengths=lengths)
else:
return super().backprop_seq2col(dY, nW, lengths=lengths)
def reduce_mean(self, X, lengths):
if X.dtype in ("float32", "float64") and lengths.dtype == "int32":
return _custom_kernels.reduce_mean(X, lengths=lengths)
else:
super().reduce_mean(X, lengths)
def backprop_reduce_mean(self, d_means, lengths):
if d_means.dtype in ("float32", "float64") and lengths.dtype == "int32":
return _custom_kernels.backprop_reduce_mean(d_means, lengths)
else:
super().backprop_reduce_mean(d_means, lengths)
def reduce_max(self, X, lengths):
if X.dtype in ("float32", "float64") and lengths.dtype == "int32":
return _custom_kernels.reduce_max(X, lengths)
else:
super().reduce_max(X, lengths)
def backprop_reduce_max(self, d_maxes, which, lengths):
if (
d_maxes.dtype in ("float32", "float64")
and which.dtype == "int32"
and lengths.dtype == "int32"
):
return _custom_kernels.backprop_reduce_max(d_maxes, which, lengths)
else:
super().backprop_reduce_max(d_maxes, which, lengths)
def reduce_sum(self, X, lengths):
if X.dtype in ("float32", "float64") and lengths.dtype == "int32":
return _custom_kernels.reduce_sum(X, lengths)
else:
return super().reduce_sum(X, lengths)
def backprop_reduce_sum(self, d_sums, lengths):
if d_sums.dtype in ("float32", "float64") and lengths.dtype == "int32":
return _custom_kernels.backprop_reduce_sum(d_sums, lengths)
else:
return super().backprop_reduce_sum(d_sums, lengths)
def hash(self, ids, seed):
return _custom_kernels.hash(ids, seed)
def scatter_add(self, table, indices, values):
self._xp2.scatter_add(table, indices, values)
def adam(
self, weights, gradient, mom1, mom2, beta1, beta2, eps, learn_rate, mod_rate=1.0
):
_check_compatible_shape(weights, gradient)
_check_compatible_shape(weights, mom1)
_check_compatible_shape(weights, mom2)
adam_kernel(
gradient, learn_rate, 1 - beta1, 1 - beta2, eps, weights, mom1, mom2
)
gradient.fill(0)
return weights, gradient, mom1, mom2
def position_encode(self, N, D, period=10000, out=None):
positions = NumpyOps().position_encode(N, D, period=period, out=out)
return self.asarray(positions)
if cupy is not None:
adam_kernel = cupy.ElementwiseKernel(
"T grad, T lr, T one_minus_beta1, T one_minus_beta2, T eps",
"T param, T m, T v",
"""m += one_minus_beta1 * (grad - m);
v += one_minus_beta2 * (grad * grad - v);
param -= lr * m / (sqrt(v) + eps);""",
"adam",
)
else:
adam_kernel = None
def _check_compatible_shape(u, v):
if u.shape != v.shape:
msg = f"arrays have incompatible shapes: {u.shape} and {v.shape}"
raise ValueError(msg)
thinc-release-v9.1.1/thinc/backends/mps_ops.py 0000664 0000000 0000000 00000001174 14670643317 0021362 0 ustar 00root root 0000000 0000000 from typing import TYPE_CHECKING
import numpy
from .. import registry
from ..compat import has_apple_ops
from .numpy_ops import NumpyOps
from .ops import Ops
if TYPE_CHECKING:
# Type checking does not work with dynamic base classes, since MyPy cannot
# determine against which base class to check. So, always derive from Ops
# during type checking.
_Ops = Ops
else:
if has_apple_ops:
from .apple_ops import AppleOps
_Ops = AppleOps
else:
_Ops = NumpyOps
@registry.ops("MPSOps")
class MPSOps(_Ops):
"""Ops class for Metal Performance shaders."""
name = "mps"
xp = numpy
thinc-release-v9.1.1/thinc/backends/numpy_ops.pxd 0000664 0000000 0000000 00000003705 14670643317 0022100 0 ustar 00root root 0000000 0000000 from .cblas cimport saxpy_ptr
ctypedef double[:, ::1] double2d_t
ctypedef double[:, :, ::1] double3d_t
ctypedef float[:, ::1] float2d_t
ctypedef float[:, :, ::1] float3d_t
ctypedef int[:, ::1] int2d_t
ctypedef unsigned int[:, ::1] uint2d_t
cdef fused ints2d_ft:
int2d_t
uint2d_t
cdef fused reals2d_ft:
float2d_t
double2d_t
cdef fused reals3d_ft:
float3d_t
double3d_t
cdef extern from "cpu_kernels.hh":
cdef cppclass axpy[T]:
ctypedef void (*ptr)(int N, T alpha, const T* X, int incX, T *Y, int incY);
void cpu_maxout[A, L](A* best__bo, L* which__bo, const A* cands_bop,
L B, L O, L P)
void cpu_backprop_maxout[A, L](A* dX__bop, const A* dX__bo, const L* which__bo,
L B, L O, L P) except +
void cpu_reduce_max[A, L](A* maxes__bo, L* which_bo, const A* X__to,
const L* lengths__b, L B, L T, L O) except +
void cpu_backprop_reduce_max[A, L](A* dX__to, const A* d_maxes__bo, const L* which__bo,
const L* lengths__b, L B, L T, L O) except +
void cpu_reduce_mean[A, L](A* means__bo, const A* X__to, const L* lengths__b,
L B, L T, L O) except +
void cpu_backprop_reduce_mean[A, L](A* dX__to, const A* d_means__bo, const L* lengths__b,
L B, L T, L O)
void cpu_mish[A, L](A* Y, L N, A threshold)
void cpu_backprop_mish[A, L](A* dX, const A* X, L N, A threshold)
void cpu_reduce_sum[A, L](A* sums__bo, const A* X__to, const L* lengths__b,
L B, L T, L O) except +
void cpu_backprop_reduce_sum[A, L](A* dX__to, const A* d_sums__bo, const L* lengths__b,
L B, L T, L O)
void cpu_relu[A, L](A* X, L N)
void backprop_seq2col[A, L](A* d_seqs, const A* d_cols, const L* lengths, L B, L I, L nW, L nL)
void seq2col[A, L](A* output, const A* X, const L* lengths, L nW, L B, L I, L nL)
void cpu_gather_add[F, I, L](axpy[F].ptr axpy, F* out_bo, const F* table_to, const I* indices_bk,
L T, L O, L B, L K) except +
thinc-release-v9.1.1/thinc/backends/numpy_ops.pyx 0000664 0000000 0000000 00000112556 14670643317 0022132 0 ustar 00root root 0000000 0000000 # cython: cdivision=True
# cython: infer_types=True
from collections.abc import Sized
from typing import Optional
import numpy
cimport cython
cimport numpy as np
from cymem.cymem cimport Pool
from libc.math cimport isnan
from libc.stdint cimport uint32_t, uint64_t
from libc.stdlib cimport calloc, free, malloc
from libc.string cimport memcpy, memset
from murmurhash.mrmr cimport hash64
from preshed.maps cimport PreshMap
from .. import registry
from ..types import ArrayXd, DeviceTypes, DTypes, Shape
from ..util import copy_array, get_array_module
from .cblas cimport CBlas, daxpy, dgemm, saxpy, sgemm, sscal
from ..compat import has_blis
from .ops import Ops, _split_weights, _transpose_weights, _untranspose_unsplit_weights
cdef extern from "math.h":
float logf(float x) nogil
float sqrtf(float x) nogil
float expf(float x) nogil
float tanhf(float x) nogil
float sinf(float x) nogil
float cosf(float x) nogil
@registry.ops("NumpyOps")
class NumpyOps(Ops):
name = "numpy"
xp = numpy
def __init__(
self,
device_type: DeviceTypes = "cpu",
device_id: int = -1,
*,
use_blis: bool = True
) -> None:
self.device_type = device_type
self.device_id = device_id
self.use_blis = use_blis
if self.use_blis and not has_blis:
raise ValueError("BLIS support requires blis: pip install blis")
def asarray(self, data, dtype=None):
if isinstance(data, self.xp.ndarray):
array = data
elif hasattr(data, 'numpy'):
# Handles PyTorch Tensor
array = data.numpy()
elif hasattr(data, "get"):
array = data.get()
else:
array = self.xp.array(data, dtype=dtype)
if dtype is not None:
array = array.astype(dtype=dtype, copy=False)
return array
def alloc(self, shape: Shape, *, dtype: Optional[DTypes] = "float32", zeros: bool = True) -> ArrayXd:
if zeros:
return self.xp.zeros(shape, dtype=dtype)
else:
return self.xp.empty(shape, dtype=dtype)
def cblas(self) -> CBlas:
return CBlas()
def gemm(self, np.ndarray x, np.ndarray y, *, np.ndarray out=None, trans1=False, trans2=False):
if x.ndim != 2:
raise ValueError(f"Provided 'x' array should be 2-dimensional, but found {x.ndim} dimension(s).")
if y.ndim != 2:
raise ValueError(f"Provided 'y' array should be 2-dimensional, but found {y.ndim} dimension(s).")
if not self.use_blis: # delegate to base Ops
return super().gemm(x, y, out=out, trans1=trans1, trans2=trans2)
x = self.as_contig(x)
y = self.as_contig(y)
cdef int nM = x.shape[0] if not trans1 else x.shape[1]
cdef int nK = x.shape[1] if not trans1 else x.shape[0]
cdef int nK_b = y.shape[0] if not trans2 else y.shape[1]
cdef int nN = y.shape[1] if not trans2 else y.shape[0]
if nK != nK_b:
msg = "Shape mismatch for blis.gemm: (%d, %d), (%d, %d)"
raise ValueError(msg % (nM, nK, nK_b, nN))
if out is not None:
out = self.as_contig(out)
else:
# Can be uninitialized as 'beta' is zero.
out = numpy.empty((nM, nN), dtype=x.dtype)
cblas = self.cblas()
if x.dtype == "float32" and y.dtype == "float32" and out.dtype == "float32":
sgemm(cblas)(trans1, trans2,
nM, nN, nK,
1.0,
(x.data), x.shape[1],
(y.data), y.shape[1],
0.0,
(out.data), out.shape[1])
elif x.dtype == "float64" and y.dtype == "float64" and out.dtype == "float64":
dgemm(cblas)(trans1, trans2,
nM, nN, nK,
1.0,
(x.data), x.shape[1],
(y.data), y.shape[1],
0.0,
(out.data), out.shape[1])
else:
raise ValueError(f"unsupported or mismatching array data types; got '{x.dtype}', '{y.dtype}', '{out.dtype}'")
return out
def relu(self, np.ndarray X, inplace=False):
cdef np.ndarray Y
if X.dtype == "float32":
Y = _inplace_or_copy(X, inplace)
cpu_relu(Y.data, Y.size)
return Y
elif X.dtype == "float64":
Y = _inplace_or_copy(X, inplace)
cpu_relu(Y.data, Y.size)
return Y
else:
return super().relu(X, inplace=inplace)
def backprop_relu(self, np.ndarray dY, np.ndarray Y, inplace=False):
_check_compatible_shape(dY, Y)
cdef size_t size = Y.size
cdef float* dX_ptr
cdef const float* Y_ptr = Y.data
cdef np.ndarray dX
if dY.dtype == "float32" and Y.dtype == "float32":
dX = _inplace_or_copy(dY, inplace)
dX_ptr = dX.data
for i in range(size):
if Y_ptr[i] <= 0:
dX_ptr[i] = 0.
return dX
else:
return super().backprop_relu(dY, Y, inplace)
def lstm_forward_training(
self,
np.ndarray params,
np.ndarray H0,
np.ndarray C0,
np.ndarray X,
np.ndarray size_at_t
):
assert H0.shape[0] == C0.shape[0]
assert H0.shape[1] == C0.shape[1]
Y, fwd_state = lstm_forward_training(self.cblas(), params, H0, C0, X, size_at_t)
return Y, fwd_state
def lstm_forward_inference(
self,
np.ndarray params,
np.ndarray H0,
np.ndarray C0,
np.ndarray X,
np.ndarray size_at_t
):
Y, _ = lstm_forward_training(self.cblas(), params, H0, C0, X, size_at_t)
return Y
def backprop_lstm(
self, np.ndarray dY, np.ndarray lengths, np.ndarray params, fwd_state
):
dX, d_params = backprop_lstm(self.cblas(), dY, lengths, params, fwd_state)
return dX, d_params
def maxout(self, reals3d_ft X):
cdef int B = X.shape[0]
cdef int O = X.shape[1]
cdef int P = X.shape[2]
cdef np.ndarray best
cdef np.ndarray which = self.alloc(shape=(B, O), dtype='int32', zeros=False)
if reals3d_ft is float3d_t:
best = self.alloc(shape=(B, O), dtype="float32", zeros=False)
if len(X) > 0:
cpu_maxout(best.data, which.data,
&X[0, 0, 0], B, O, P)
else:
best = self.alloc(shape=(B, O), dtype="float64", zeros=False)
if len(X) > 0:
cpu_maxout(best.data, which.data,
&X[0, 0, 0], B, O, P)
return best, which
def backprop_maxout(self, reals2d_ft dY, int[:, ::1] which, int P):
cdef int B = dY.shape[0]
cdef int O = dY.shape[1]
cdef np.ndarray dX
if reals2d_ft == float2d_t:
dX = numpy.zeros((B, O, P), dtype='float32')
cpu_backprop_maxout(dX.data, &dY[0, 0], &which[0, 0], B, O, P)
else:
dX = numpy.zeros((B, O, P), dtype='float64')
cpu_backprop_maxout(dX.data, &dY[0, 0], &which[0, 0], B, O, P)
return dX
def mish(self, np.ndarray X, threshold=20.0, inplace: bool = False):
cdef np.ndarray Y
if X.dtype == "float32":
Y = _inplace_or_copy(X, inplace)
cpu_mish(Y.data, Y.size, threshold)
return Y
elif X.dtype == "float64":
Y = _inplace_or_copy(X, inplace)
cpu_mish(Y.data, Y.size, threshold)
return Y
else:
return super().mish(X, inplace=inplace)
def backprop_mish(self, np.ndarray dY, np.ndarray X, threshold=20.0, inplace=False):
_check_compatible_shape(dY, X)
cdef np.ndarray dX
if dY.dtype == "float32" and X.dtype == "float32":
dX = _inplace_or_copy(dY, inplace)
cpu_backprop_mish(dX.data, X.data, X.size, threshold)
return dX
elif dY.dtype == "float64" and X.dtype == "float64":
dX = _inplace_or_copy(dY, inplace)
cpu_backprop_mish(dX.data, X.data, X.size, threshold)
return dX
else:
return super().backprop_mish(dY, X, threshold, inplace)
def seq2col(self, np.ndarray seq, int nW, *, int[::1] lengths=None):
"""Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1))
sequence. The new sequence is constructed by concatenating nW preceding
and succeeding vectors onto each column in the sequence, to extract a
window of features.
"""
# Note: the type of seq should be changed to reals2d_ft once
# cython/cython#4697 is fixed. The following checks can then be
# removed, because they are guaranteed by the reals2d_ft
# type.
if seq.ndim != 2:
msg = f"seq2col requires sequence array of dimensionality 2, was {seq.ndim}"
raise ValueError(msg)
if not seq.flags.c_contiguous:
msg = f"seq2col requires sequence array that is in C order and contiguous"
raise ValueError(msg)
cdef int B = seq.shape[0]
cdef int I = seq.shape[1]
lengths = check_seq2col_lengths(self, lengths, B)
cdef int nL = lengths.shape[0]
cdef np.ndarray cols
if seq.dtype == "float32":
cols = self.alloc((B, (2*nW + 1) * I), dtype="float32")
if seq.size != 0 and lengths.size != 0:
seq2col(cols.data, seq.data, &lengths[0], nW, B, I, nL)
return cols
elif seq.dtype == "float64":
cols = self.alloc((B, (2*nW + 1) * I), dtype="float64")
if seq.size != 0 and lengths.size != 0:
seq2col(cols.data, seq.data, &lengths[0], nW, B, I, nL)
return cols
else:
return super().seq2col(seq, nW, lengths=lengths)
def backprop_seq2col(self, np.ndarray dY, int nW, *, int[::1] lengths=None):
# Note: the type of dY should be changed to reals2d_ft once
# cython/cython#4697 is fixed. The following checks can then be
# removed, because they are guaranteed by the reals2d_ft
# type.
if dY.ndim != 2:
msg = f"backprop_seq2col requires gradient array of dimensionality 2, was {dY.ndim}"
raise ValueError(msg)
if not dY.flags.c_contiguous:
msg = f"backprop_seq2col requires gradient array that is in C order and contiguous"
raise ValueError(msg)
cdef int B = dY.shape[0]
cdef int nF = nW*2+1
cdef int I = dY.shape[1] / nF
lengths = check_seq2col_lengths(self, lengths, B)
cdef int nL = lengths.shape[0]
cdef np.ndarray dX
if dY.dtype == "float32":
dX = self.alloc((B, I), dtype='float32')
if dY.size != 0 and lengths.size != 0:
backprop_seq2col(dX.data, dY.data, &lengths[0], B, I, nW, nL)
return dX
elif dY.dtype == "float64":
dX = self.alloc((B, I), dtype='float64')
if dY.size != 0 and lengths.size != 0:
backprop_seq2col(dX.data, dY.data, &lengths[0], B, I, nW, nL)
return dX
else:
return super().backprop_seq2col(dY, nW, lengths=lengths)
@cython.boundscheck(False)
@cython.wraparound(False)
def hash(self, const uint64_t[::1] ids, uint32_t seed):
"""Hash a sequence of 64-bit keys into a table with 4 32-bit keys."""
# Written to mirror the GPU implementation
cdef np.ndarray[uint32_t, ndim=2] keys = self.alloc((ids.shape[0], 4), dtype='uint32')
cdef int i
cdef uint32_t* dest = keys.data
for i in range(len(ids)):
MurmurHash3_x86_128_uint64(ids[i], seed, &dest[i*4])
return keys
def reduce_mean(self, reals2d_ft X, int[::1] lengths):
cdef int B = lengths.shape[0]
cdef int O = X.shape[1]
cdef int T = X.shape[0]
if B == 0 or O == 0:
if reals2d_ft is float2d_t:
return numpy.zeros(shape=(B, O), dtype="float32")
else:
return numpy.zeros(shape=(B, O), dtype="float64")
cdef np.ndarray means
if reals2d_ft is float2d_t:
means = numpy.zeros(shape=(B, O), dtype="float32")
cpu_reduce_mean(means.data, &X[0, 0], &lengths[0], B, T, O)
else:
means = numpy.zeros(shape=(B, O), dtype="float64")
cpu_reduce_mean(means.data, &X[0, 0], &lengths[0], B, T, O)
return means
def backprop_reduce_mean(self, reals2d_ft d_means, int[::1] lengths):
cdef int B = lengths.shape[0]
cdef int O = d_means.shape[1]
cdef int T = 0
for length in lengths[:B]:
if length < 0:
raise ValueError(f"all sequence lengths must be >= 0, got {length}")
T += length
if T == 0 or O == 0:
if reals2d_ft is float2d_t:
return numpy.zeros(shape=(T, O), dtype="float32")
else:
return numpy.zeros(shape=(T, O), dtype="float64")
cdef np.ndarray dX
if reals2d_ft is float2d_t:
dX = numpy.zeros((T, O), dtype="float32")
cpu_backprop_reduce_mean(dX.data, &d_means[0,0], &lengths[0], B, T, O)
else:
dX = numpy.zeros((T, O), dtype="float64")
cpu_backprop_reduce_mean(dX.data, &d_means[0,0], &lengths[0], B, T, O)
return dX
def reduce_sum(self, reals2d_ft X, int[::1] lengths):
cdef int B = lengths.shape[0]
cdef int O = X.shape[1]
cdef int T = X.shape[0]
if B == 0 or O == 0:
if reals2d_ft is float2d_t:
return numpy.zeros(shape=(B, O), dtype="float32")
else:
return numpy.zeros(shape=(B, O), dtype="float64")
cdef np.ndarray sums
if reals2d_ft is float2d_t:
sums = numpy.zeros(shape=(B, O), dtype="float32")
cpu_reduce_sum(sums.data, &X[0, 0], &lengths[0], B, T, O)
else:
sums = numpy.zeros(shape=(B, O), dtype="float64")
cpu_reduce_sum(sums.data, &X[0, 0], &lengths[0], B, T, O)
return sums
def backprop_reduce_sum(self, reals2d_ft d_sums, int[::1] lengths):
cdef int B = lengths.shape[0]
cdef int O = d_sums.shape[1]
cdef int T = 0
for length in lengths[:B]:
if length < 0:
raise ValueError(f"all sequence lengths must be >= 0, got {length}")
T += length
if T == 0 or O == 0:
if reals2d_ft is float2d_t:
return numpy.zeros(shape=(T, O), dtype="float32")
else:
return numpy.zeros(shape=(T, O), dtype="float64")
cdef np.ndarray dX
if reals2d_ft is float2d_t:
dX = numpy.zeros((T, O), dtype="float32")
cpu_backprop_reduce_sum(dX.data, &d_sums[0,0], &lengths[0], B, T, O)
else:
dX = numpy.zeros((T, O), dtype="float64")
cpu_backprop_reduce_sum(dX.data, &d_sums[0,0], &lengths[0], B, T, O)
return dX
def reduce_max(self, reals2d_ft X, int[::1] lengths):
cdef int B = lengths.shape[0]
cdef int O = X.shape[1]
cdef int T = X.shape[0]
# Needs to be zero-initialized as we start by assuming that the first element is the max value.
cdef np.ndarray which = self.alloc(shape=(B, O), dtype="i", zeros=True)
if B == 0 or O == 0:
if reals2d_ft is float2d_t:
return numpy.zeros(shape=(B, O), dtype="float32"), which
else:
return numpy.zeros(shape=(B, O), dtype="float64"), which
cdef np.ndarray maxes
if reals2d_ft is float2d_t:
maxes = self.alloc(shape=(B, O), dtype="float32", zeros=False)
cpu_reduce_max(maxes.data, which.data, &X[0, 0], &lengths[0], B, T, O)
else:
maxes = self.alloc(shape=(B, O), dtype="float64", zeros=False)
cpu_reduce_max(maxes.data, which.data, &X[0, 0], &lengths[0], B, T, O)
return maxes, which
def backprop_reduce_max(self, reals2d_ft d_maxes, int[:, ::1] which, int[::1] lengths):
cdef int B = lengths.shape[0]
cdef int O = d_maxes.shape[1]
cdef int T = 0
for length in lengths[:B]:
if length <= 0:
raise ValueError(f"all sequence lengths must be > 0, got {length}")
T += length
if T == 0 or O == 0:
if reals2d_ft is float2d_t:
return numpy.zeros(shape=(T, O), dtype="float32")
else:
return numpy.zeros(shape=(T, O), dtype="float64")
cdef np.ndarray dX
if reals2d_ft is float2d_t:
dX = numpy.zeros((T, O), dtype="float32")
cpu_backprop_reduce_max(dX.data, &d_maxes[0,0], &which[0, 0],
&lengths[0], B, T, O)
else:
dX = numpy.zeros((T, O), dtype="float64")
cpu_backprop_reduce_max(dX.data, &d_maxes[0,0], &which[0, 0],
&lengths[0], B, T, O)
return dX
def gather_add(self, reals2d_ft table, ints2d_ft indices):
cdef CBlas cblas = self.cblas()
rows = indices.shape[0]
dims = table.shape[1]
cdef np.ndarray output
if reals2d_ft is float2d_t:
output = self.xp.zeros((rows, dims), dtype="float32")
cpu_gather_add(saxpy(cblas),