diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..3071611e29 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +Lib/* linguist-vendored diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 0000000000..4659a9d465 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,159 @@ +on: + push: + branches: [master, release] + pull_request: + +name: CI + +jobs: + rust_tests: + name: Run rust tests + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-latest, ubuntu-latest, windows-latest] + fail-fast: false + steps: + - uses: actions/checkout@master + - name: Convert symlinks to hardlink (windows only) + run: powershell.exe scripts/symlinks-to-hardlinks.ps1 + if: runner.os == 'Windows' + - name: Cache cargo dependencies + uses: actions/cache@v1 + with: + key: ${{ runner.os }}-rust_tests-${{ hashFiles('Cargo.lock') }} + path: target + restore-keys: | + ${{ runner.os }}-rust_tests- + - name: run rust tests + uses: actions-rs/cargo@v1 + with: + command: test + args: --verbose --all + + snippets: + name: Run snippets tests + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-latest, ubuntu-latest, windows-latest] + fail-fast: false + steps: + - uses: actions/checkout@master + - name: Convert symlinks to hardlink (windows only) + run: powershell.exe scripts/symlinks-to-hardlinks.ps1 + if: runner.os == 'Windows' + - name: Cache cargo dependencies + uses: actions/cache@v1 + with: + key: ${{ runner.os }}-snippets-${{ hashFiles('Cargo.lock') }} + path: target + restore-keys: | + ${{ runner.os }}-snippets- + - name: build rustpython + uses: actions-rs/cargo@v1 + with: + command: build + args: --release --verbose --all + - uses: actions/setup-python@v1 + with: + python-version: 3.6 + - name: Install pipenv + run: | + python -V + python -m pip install --upgrade pip + python -m pip install pipenv + - run: pipenv install + working-directory: ./tests + - name: run snippets + run: pipenv run pytest -v + working-directory: ./tests + + format: + name: Check Rust code with rustfmt and clippy + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + components: rustfmt + override: true + - name: run rustfmt + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + - name: run clippy + uses: actions-rs/cargo@v1 + with: + command: clippy + args: --all -- -Dwarnings + + lint: + name: Lint Python code with flake8 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - uses: actions/setup-python@v1 + with: + python-version: 3.6 + - name: install flake8 + run: python -m pip install flake8 + - name: run lint + run: flake8 . --count --exclude=./.*,./Lib,./vm/Lib --select=E9,F63,F7,F82 --show-source --statistics + + cpython: + name: Run CPython test suite + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: build rustpython + uses: actions-rs/cargo@v1 + with: + command: build + args: --verbose --all + - name: run tests + run: | + export RUSTPYTHONPATH=`pwd`/Lib + cargo run -- -m test -v + + wasm: + name: Check the WASM package and demo + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: install wasm-pack + run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh + - name: install geckodriver + run: | + wget https://github.com/mozilla/geckodriver/releases/download/v0.24.0/geckodriver-v0.24.0-linux32.tar.gz + mkdir geckodriver + tar -xzf geckodriver-v0.24.0-linux32.tar.gz -C geckodriver + - uses: actions/setup-python@v1 + with: + python-version: 3.6 + - name: Install pipenv + run: | + python -V + python -m pip install --upgrade pip + python -m pip install pipenv + - run: pipenv install + working-directory: ./wasm/tests + - uses: actions/setup-node@v1 + - name: run test + run: | + export PATH=$PATH:`pwd`/../../geckodriver + npm install + npm run test + working-directory: ./wasm/demo + - name: Deploy demo to Github Pages + if: success() && github.ref == 'refs/heads/release' + uses: peaceiris/actions-gh-pages@v2 + env: + ACTIONS_DEPLOY_KEY: ${{ secrets.ACTIONS_DEMO_DEPLOY_KEY }} + PUBLISH_DIR: ./wasm/demo/dist + EXTERNAL_REPOSITORY: RustPython/demo + PUBLISH_BRANCH: master + diff --git a/.gitignore b/.gitignore index 8347c5ec51..05270f95d6 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,5 @@ tests/snippets/resources flame-graph.html flame.txt flamescope.json +/wapm.lock +/wapm_packages diff --git a/.travis.yml b/.travis.yml index 228aba298e..bdab2ad7c2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,15 +1,26 @@ -before_cache: | - if command -v cargo; then - ! command -v cargo-sweep && cargo install cargo-sweep - cargo sweep -i - cargo sweep -t 15 - fi +branches: + only: + - master + - release + - redox-release -matrix: +before_cache: + - | + if command -v cargo; then + if ! command -v cargo-sweep; then + cargo install cargo-sweep + fi + cargo sweep -i + cargo sweep -t 15 + fi + - rm -rf ~/.cargo/registry/src + +jobs: fast_finish: true include: - - name: Run Rust tests + - name: Run Rust tests(linux) language: rust + os: linux rust: stable cache: cargo script: @@ -20,11 +31,24 @@ matrix: # See: https://docs.travis-ci.com/user/caching/#caches-and-build-matrices - JOBCACHE=1 + - name: Run Rust tests(osx) + language: rust + os: osx + rust: stable + cache: cargo + script: + - cargo build --verbose --all + - cargo test --verbose --all + env: + # Prevention of cache corruption. + # See: https://docs.travis-ci.com/user/caching/#caches-and-build-matrices + - JOBCACHE=11 + # To test the snippets, we use Travis' Python environment (because # installing rust ourselves is a lot easier than installing Python) - name: Python test snippets language: python - python: 3.6 + python: 3.8 cache: - pip - cargo @@ -34,31 +58,22 @@ matrix: - CODE_COVERAGE=false script: tests/.travis-runner.sh - - name: Check Rust code style with rustfmt + - name: Check Rust code with rustfmt and clippy language: rust rust: stable cache: cargo before_script: - rustup component add rustfmt - script: - - cargo fmt --all -- --check - env: - - JOBCACHE=3 - - - name: Lint Rust code with clippy - language: rust - rust: stable - cache: cargo - before_script: - rustup component add clippy script: + - cargo fmt --all -- --check - cargo clippy --all -- -Dwarnings env: - - JOBCACHE=8 + - JOBCACHE=3 - name: Lint Python code with flake8 language: python - python: 3.6 + python: 3.8 cache: pip env: JOBCACHE=9 install: pip install flake8 @@ -87,36 +102,9 @@ matrix: on: branch: release - - name: WASM online demo - language: rust - rust: stable - cache: cargo - install: - - nvm install node - # install wasm-pack - - curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh - script: - - cd wasm/demo - - npm install - - npm run dist - if: branch = release - env: - - JOBCACHE=5 - deploy: - - provider: pages - repo: RustPython/demo - target-branch: master - local-dir: wasm/demo/dist - skip-cleanup: true - # Set in the settings page of your repository, as a secure variable - github-token: $WEBSITE_GITHUB_TOKEN - keep-history: true - on: - branch: release - - name: Code Coverage language: python - python: 3.6 + python: 3.8 cache: - pip - cargo @@ -131,7 +119,7 @@ matrix: - name: Test WASM language: python - python: 3.6 + python: 3.8 cache: - pip - cargo @@ -145,3 +133,58 @@ matrix: env: - JOBCACHE=7 - TRAVIS_RUST_VERSION=stable + + - name: Ensure compilation on Redox OS with Redoxer + # language: minimal so that it actually uses bionic rather than xenial; + # rust isn't yet available on bionic + language: minimal + dist: bionic + if: type = cron + cache: + cargo: true + directories: + - $HOME/.redoxer + - $HOME/.cargo + before_install: + # install rust as travis does for language: rust + - curl -sSf https://build.travis-ci.org/files/rustup-init.sh | sh -s -- + --default-toolchain=$TRAVIS_RUST_VERSION -y + - export PATH=${TRAVIS_HOME}/.cargo/bin:$PATH + - rustc --version + - rustup --version + - cargo --version + + - sudo apt-get update -qq + - sudo apt-get install libfuse-dev + install: + - if ! command -v redoxer; then cargo install redoxfs redoxer; fi + - redoxer install + script: + - bash redox/uncomment-cargo.sh + - redoxer build --verbose + - bash redox/comment-cargo.sh + before_cache: + - | + if ! command -v cargo-sweep; then + rustup install stable + cargo +stable install cargo-sweep + fi + - cargo sweep -t 15 + - rm -rf ~/.cargo/registry/src + env: + - JOBCACHE=10 + - TRAVIS_RUST_VERSION=nightly + + - name: Run CPython test suite + language: rust + os: linux + rust: stable + cache: cargo + script: + - cargo build --verbose --all + - export RUSTPYTHONPATH=`pwd`/Lib + - cargo run -- -m test -v + env: + # Prevention of cache corruption. + # See: https://docs.travis-ci.com/user/caching/#caches-and-build-matrices + - JOBCACHE=12 diff --git a/Cargo.lock b/Cargo.lock index e05958928c..e9bdd29606 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,2063 +1,2427 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. [[package]] -name = "aho-corasick" -version = "0.6.10" +name = "Inflector" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" + +[[package]] +name = "adler32" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2" [[package]] name = "aho-corasick" -version = "0.7.4" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f56c476256dc249def911d6f7580b5fc7e875895b5d7ee88f5d602208035744" dependencies = [ - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr", ] [[package]] name = "ansi_term" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +dependencies = [ + "winapi", +] + +[[package]] +name = "anyhow" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7825f6833612eb2414095684fcf6c635becf3ce97fe48cf6421321e93bfbd53c" + +[[package]] +name = "arr_macro" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a105bfda48707cf19220129e78fca01e9639433ffaef4163546ed8fb04120a5" dependencies = [ - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "arr_macro_impl", + "proc-macro-hack", ] [[package]] -name = "argon2rs" -version = "0.2.5" +name = "arr_macro_impl" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" dependencies = [ - "blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", - "scoped_threadpool 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-hack", + "quote 1.0.2", + "syn 1.0.14", ] +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + [[package]] name = "arrayvec" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" dependencies = [ - "nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "nodrop", ] +[[package]] +name = "arrayvec" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" + [[package]] name = "ascii-canvas" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff8eb72df928aafb99fe5d37b383f2fe25bd2a765e3e5f7c365916b6f2463a29" dependencies = [ - "term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "term", ] [[package]] name = "atty" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi", + "libc", + "winapi", ] [[package]] name = "autocfg" -version = "0.1.5" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" + +[[package]] +name = "autocfg" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" [[package]] name = "backtrace" -version = "0.3.33" +version = "0.3.42" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4b1549d804b6c73f4817df2ba073709e96e426f12987127c48e6745568c350b" dependencies = [ - "backtrace-sys 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-demangle 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace-sys", + "cfg-if", + "libc", + "rustc-demangle", ] [[package]] name = "backtrace-sys" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6575f128516de27e3ce99689419835fce9643a9b215a14d2b5b685be018491" dependencies = [ - "cc 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "cc", + "libc", ] +[[package]] +name = "base64" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" + [[package]] name = "bincode" -version = "1.1.4" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5753e2a71534719bf3f4e57006c3a4f0d2c672a4b676eec84161f763eca87dbf" dependencies = [ - "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2", + "serde", ] [[package]] name = "bit-set" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e84c238982c4b1e1ee668d136c510c67a13465279c0cb367ea6baf6310620a80" dependencies = [ - "bit-vec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bit-vec", ] [[package]] name = "bit-vec" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f59bbe95d4e52a6398ec21238d31577f2b28a9d86807f06ca59d191d8440d0bb" [[package]] name = "bitflags" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "blake2" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94cb07b0da6a73955f8fb85d24c466778e70cda767a568229b104f0264089330" dependencies = [ - "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "crypto-mac 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "opaque-debug 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byte-tools", + "crypto-mac", + "digest", + "opaque-debug", ] [[package]] -name = "blake2-rfc" -version = "0.2.18" +name = "blake2b_simd" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" dependencies = [ - "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", - "constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayref", + "arrayvec 0.5.1", + "constant_time_eq", ] [[package]] name = "block-buffer" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", + "block-padding", + "byte-tools", + "byteorder 1.3.2", + "generic-array", ] [[package]] name = "block-padding" -version = "0.1.4" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", +] + +[[package]] +name = "bstr" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe8a65814ca90dfc9705af76bb6ba3c6e2534489a72270e797e603783bb4990b" dependencies = [ - "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0", + "memchr", + "regex-automata", + "serde", ] [[package]] name = "build_const" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39092a32794787acd8525ee150305ff051b0aa6cc2abaf193924f5ab05425f39" [[package]] name = "bumpalo" -version = "2.5.0" +version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fb8038c1ddc0a5f73787b130f4cc75151e96ed33e417fde765eb5a81e3532f4" [[package]] name = "byte-tools" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + +[[package]] +name = "byteorder" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855" [[package]] name = "byteorder" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" + +[[package]] +name = "c2-chacha" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" +dependencies = [ + "ppv-lite86", +] [[package]] name = "caseless" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "808dab3318747be122cb31d36de18d4d1c81277a76f8332a02b81a3d73463d7f" dependencies = [ - "regex 1.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "regex", + "unicode-normalization", ] [[package]] name = "cc" -version = "1.0.37" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" [[package]] name = "cfg-if" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b486ce3ccf7ffd79fdeb678eac06a9e6c09fc88d33836340becb8fffe87c5e33" [[package]] name = "chrono" -version = "0.4.6" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8493056968583b0193c1bb04d6f7684586f3726992d6c573261941a895dbd68" dependencies = [ - "num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys", + "libc", + "num-integer", + "num-traits", + "time", + "wasm-bindgen", ] [[package]] name = "clap" version = "2.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" dependencies = [ - "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", - "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ansi_term", + "atty", + "bitflags", + "strsim 0.8.0", + "textwrap", + "unicode-width", + "vec_map", ] [[package]] name = "cloudabi" version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" dependencies = [ - "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags", ] [[package]] name = "constant_time_eq" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "cpython" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b489034e723e7f5109fecd19b719e664f89ef925be785885252469e9822fa940" dependencies = [ - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "python3-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "num-traits", + "python3-sys", ] [[package]] name = "crc" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d663548de7f5cca343f1e0a48d14dcfb0e9eb4e079ec58883b7251539fa10aeb" +dependencies = [ + "build_const", +] + +[[package]] +name = "crc32fast" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-utils" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4" dependencies = [ - "build_const 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 0.1.7", + "cfg-if", + "lazy_static 1.4.0", ] [[package]] name = "crypto-mac" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +dependencies = [ + "generic-array", + "subtle", +] + +[[package]] +name = "csv" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00affe7f6ab566df61b4be3ce8cf16bc2576bca0963ceb0955e45d514bf9a279" +dependencies = [ + "bstr", + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b5cadb6b25c77aeff80ba701712494213f4a8418fcda2ee11b6560c3ad0bf4c" dependencies = [ - "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", - "subtle 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr", ] [[package]] name = "diff" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" [[package]] name = "digest" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" dependencies = [ - "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", + "generic-array", ] [[package]] name = "dirs" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + +[[package]] +name = "dirs" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" +dependencies = [ + "cfg-if", + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afa0b23de8fd801745c471deffa6e12d248f962c9fd4b4c33787b055599bde7b" dependencies = [ - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_users 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "libc", + "redox_users", + "winapi", +] + +[[package]] +name = "dns-lookup" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13988670860b076248c74e1b54444efc4f1dec70c8bb25da4b7c0024396b72bf" +dependencies = [ + "libc", + "socket2", + "winapi", ] [[package]] name = "docopt" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f525a586d310c87df72ebcd98009e57f1cc030c8c268305287a476beb653969" dependencies = [ - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", - "strsim 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0", + "regex", + "serde", + "strsim 0.9.3", ] [[package]] name = "either" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" [[package]] name = "ena" -version = "0.11.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8944dc8fa28ce4a38f778bd46bf7d923fe73eed5a439398507246c8e017e6f36" dependencies = [ - "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "log", ] [[package]] name = "env_logger" -version = "0.5.13" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" dependencies = [ - "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", - "humantime 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "termcolor 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "atty", + "humantime", + "log", + "regex", + "termcolor", ] +[[package]] +name = "exitcode" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de853764b47027c2e862a995c34978ffa63c1501f2e15f987ba11bd4f9bba193" + [[package]] name = "failure" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8273f13c977665c5db7eb2b99ae520952fe5ac831ae4cd09d80c4c7042b5ed9" dependencies = [ - "backtrace 0.3.33 (registry+https://github.com/rust-lang/crates.io-index)", - "failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace", + "failure_derive", ] [[package]] name = "failure_derive" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bc225b78e0391e4b8683440bf2e63c2deeeb2ce5189eab46e2b68c6d3725d08" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.39 (registry+https://github.com/rust-lang/crates.io-index)", - "synstructure 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8", + "quote 1.0.2", + "syn 1.0.14", + "synstructure", ] [[package]] name = "fake-simd" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" [[package]] name = "fixedbitset" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86d4de0081402f5e88cdac65c8dcdcc73118c1a7a465e2a05f0da05843a8ea33" [[package]] name = "flame" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc2706461e1ee94f55cab2ed2e3d34ae9536cfa830358ef80acff1a3dacab30" dependencies = [ - "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", - "thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 0.2.11", + "serde", + "serde_derive", + "serde_json", + "thread-id", ] [[package]] name = "flamer" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2add1a5e84b1ed7b5d00cdc21789a28e0a8f4e427b677313c773880ba3c4dac" dependencies = [ - "flame 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.39 (registry+https://github.com/rust-lang/crates.io-index)", + "flame", + "quote 0.6.13", + "syn 0.15.44", ] [[package]] name = "flamescope" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e3e6aee625a28b97be4308bccc2eb5f81d9ec606b3f29e13c0f459ce20dd136" +dependencies = [ + "flame", + "indexmap", + "serde", + "serde_json", +] + +[[package]] +name = "flate2" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bd6d6f4752952feb71363cffc9ebac9411b75b87c6ab6058c40c8900cf43c0f" dependencies = [ - "flame 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "indexmap 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "crc32fast", + "libc", + "libz-sys", + "miniz_oxide", ] [[package]] name = "fnv" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" [[package]] name = "fuchsia-cprng" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" [[package]] name = "futures" -version = "0.1.28" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" [[package]] name = "generic-array" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +dependencies = [ + "typenum", +] + +[[package]] +name = "gethostname" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e692e296bfac1d2533ef168d0b60ff5897b8b70a4009276834014dd8924cc028" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "getrandom" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" dependencies = [ - "typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "libc", + "wasi", ] [[package]] name = "heck" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" dependencies = [ - "unicode-segmentation 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-segmentation", +] + +[[package]] +name = "hermit-abi" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eff2656d88f158ce120947499e971d743c05dbcbed62e5bd2f38f1698bbc3772" +dependencies = [ + "libc", ] [[package]] name = "hex" -version = "0.3.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "023b39be39e3a2da62a94feb433e91e8bcd37676fbc8bea371daf52b7a769a3e" [[package]] name = "hexf-parse" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296f72d53a89096cbc9a88c9547ee8dfe793388674620e2207593d370550ac" [[package]] name = "humantime" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" dependencies = [ - "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "quick-error", ] [[package]] name = "indexmap" -version = "1.0.2" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b54058f0a6ff80b6803da8faf8997cde53872b38f4023728f6830b06cd3c0dc" +dependencies = [ + "autocfg 1.0.0", +] + +[[package]] +name = "is-macro" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04807f3dc9e3ea39af3f8469a5297267faf94859637afb836b33f47d9b2650ee" +dependencies = [ + "Inflector", + "pmutil", + "proc-macro2 1.0.8", + "quote 1.0.2", + "syn 1.0.14", +] [[package]] name = "itertools" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484" dependencies = [ - "either 1.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "either", ] [[package]] name = "itoa" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" [[package]] name = "js-sys" -version = "0.3.25" +version = "0.3.35" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7889c7c36282151f6bf465be4700359318aef36baa951462382eae49e9577cf9" dependencies = [ - "wasm-bindgen 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen", ] [[package]] name = "keccak" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" [[package]] name = "lalrpop" -version = "0.16.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ascii-canvas 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", - "bit-set 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "docopt 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ena 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lalrpop-util 0.16.3 (registry+https://github.com/rust-lang/crates.io-index)", - "petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", - "sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "string_cache 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64dc3698e75d452867d9bd86f4a723f452ce9d01fe1d55990b79f0c790aa67db" +dependencies = [ + "ascii-canvas", + "atty", + "bit-set", + "diff", + "docopt", + "ena", + "itertools", + "lalrpop-util", + "petgraph", + "regex", + "regex-syntax", + "serde", + "serde_derive", + "sha2", + "string_cache", + "term", + "unicode-xid 0.1.0", ] [[package]] name = "lalrpop-util" -version = "0.16.3" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c277d18683b36349ab5cd030158b54856fca6bb2d5dc5263b06288f486958b7c" [[package]] name = "lazy_static" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73" [[package]] name = "lazy_static" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "lexical" -version = "2.2.1" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaad0ee8120fc0cf7df7e8fdbe79bf9d6189351404feb88f4e4a4bb5307bc594" dependencies = [ - "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "lexical-core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "lexical-core", + "rustc_version", ] [[package]] name = "lexical-core" -version = "0.4.2" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86d66d380c9c5a685aaac7a11818bdfa1f733198dfd9ec09c70b762cd12ad6f" dependencies = [ - "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "stackvector 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "static_assertions 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.4.12", + "bitflags", + "cfg-if", + "rustc_version", + "ryu", + "static_assertions", ] [[package]] name = "libc" -version = "0.2.60" +version = "0.2.66" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" [[package]] -name = "log" -version = "0.3.9" +name = "libz-sys" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eb5e43362e38e2bca2fd5f5134c4d4564a23a5c28e9b95411652021a8675ebe" dependencies = [ - "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "cc", + "libc", + "pkg-config", + "vcpkg", ] [[package]] name = "log" -version = "0.4.7" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "lz4-compress" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f966533a922a9bba9e95e594c1fdb3b9bf5fdcdb11e37e51ad84cd76e468b91" dependencies = [ - "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 0.5.3", + "quick-error", ] [[package]] name = "maplit" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" [[package]] name = "matches" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" [[package]] name = "md-5" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a18af3dcaf2b0219366cdb4e2af65a6101457b415c3d1a5c71dd9c2b7c77b9c8" dependencies = [ - "block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "opaque-debug 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "block-buffer", + "digest", + "opaque-debug", ] [[package]] name = "memchr" -version = "2.2.1" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3197e20c7edb283f87c071ddfc7a2cca8f8e0b888c242959846a6fce03c72223" +dependencies = [ + "libc", +] + +[[package]] +name = "miniz_oxide" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f3f74f726ae935c3f514300cc6773a0c9492abc5e972d42ba0c0ebb88757625" +dependencies = [ + "adler32", +] [[package]] name = "new_debug_unreachable" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54" [[package]] name = "nix" -version = "0.13.1" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c722bee1037d430d0f8e687bbdbf222f27cc6e4e68d5caf630857bb2b6dbdce" dependencies = [ - "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags", + "cc", + "cfg-if", + "libc", + "void", ] [[package]] name = "nix" -version = "0.14.1" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd0eaf8df8bab402257e0a5c17a254e4cc1f72a93588a1ddfb5d356c801aa7cb" dependencies = [ - "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags", + "cc", + "cfg-if", + "libc", + "void", ] [[package]] name = "nodrop" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" [[package]] name = "nom" version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" dependencies = [ - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr", + "version_check", ] [[package]] name = "num-bigint" -version = "0.2.2" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ - "num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0", + "num-integer", + "num-traits", + "serde", ] [[package]] name = "num-complex" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" dependencies = [ - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0", + "num-traits", + "serde", ] [[package]] name = "num-integer" -version = "0.1.39" +version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" dependencies = [ - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0", + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfb0800a0291891dd9f4fe7bd9c19384f98f7fbe0cd0f39a2c6b88b9868bbc00" +dependencies = [ + "autocfg 1.0.0", + "num-integer", + "num-traits", ] [[package]] name = "num-rational" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da4dc79f9e6c81bef96148c8f6b8e72ad4541caa4a24373e900a36da07de03a3" dependencies = [ - "num-bigint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0", + "num-bigint", + "num-integer", + "num-traits", ] [[package]] name = "num-traits" -version = "0.2.6" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" +dependencies = [ + "autocfg 1.0.0", +] + +[[package]] +name = "num_cpus" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "once_cell" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b" [[package]] name = "opaque-debug" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" [[package]] name = "ordermap" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a86ed3f5f244b372d6b1a00b72ef7f8876d0bc6a78a4c9985c53614041512063" + +[[package]] +name = "paste" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "423a519e1c6e828f1e73b720f9d9ed2fa643dce8a7737fb43235ce0b41eeaa49" +dependencies = [ + "paste-impl", + "proc-macro-hack", +] + +[[package]] +name = "paste-impl" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4214c9e912ef61bf42b81ba9a47e8aad1b2ffaf739ab162bf96d1e011f54e6c5" +dependencies = [ + "proc-macro-hack", + "proc-macro2 1.0.8", + "quote 1.0.2", + "syn 1.0.14", +] [[package]] name = "petgraph" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3659d1ee90221741f65dd128d9998311b0e40c5d3c23a62445938214abce4f" dependencies = [ - "fixedbitset 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "ordermap 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "fixedbitset", + "ordermap", ] [[package]] name = "phf_generator" version = "0.7.24" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09364cc93c159b8b06b1f4dd8a4398984503483891b0c26b867cf431fb132662" dependencies = [ - "phf_shared 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_shared", + "rand 0.6.5", ] [[package]] name = "phf_shared" version = "0.7.24" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234f71a15de2288bcb7e3b6515828d22af7ec8598ee6d24c3b526fa0a80b67a0" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pkg-config" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" + +[[package]] +name = "pmutil" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3894e5d549cccbe44afecf72922f277f603cd4bb0219c8342631ef18fffbe004" dependencies = [ - "siphasher 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8", + "quote 1.0.2", + "syn 1.0.14", ] +[[package]] +name = "ppv-lite86" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" + [[package]] name = "precomputed-hash" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "proc-macro-hack" -version = "0.5.8" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd45702f76d6d3c75a80564378ae228a85f0b59d2f3ed43c91b4a69eb2ebfc5" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.39 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8", + "quote 1.0.2", + "syn 1.0.14", ] [[package]] name = "proc-macro2" version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" +dependencies = [ + "unicode-xid 0.1.0", +] + +[[package]] +name = "proc-macro2" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acb317c6ff86a4e579dfa00fc5e6cca91ecbb4e7eb2df0468805b674eb88548" dependencies = [ - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0", ] [[package]] name = "pwd" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dd32d8bece608e144ca20251e714ed107cdecdabb20c2d383cfc687825106a5" dependencies = [ - "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "failure", + "libc", ] [[package]] name = "python3-sys" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61e4aac43f833fd637e429506cb2ac9d7df672c4b68f2eaaa163649b7fdc0444" dependencies = [ - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "regex", ] [[package]] name = "quick-error" -version = "1.2.2" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.30", ] [[package]] -name = "rand" -version = "0.5.6" +name = "quote" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" dependencies = [ - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8", ] [[package]] name = "rand" version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" +dependencies = [ + "autocfg 0.1.7", + "libc", + "rand_chacha 0.1.1", + "rand_core 0.4.2", + "rand_hc 0.1.0", + "rand_isaac", + "rand_jitter", + "rand_os", + "rand_pcg", + "rand_xorshift", + "winapi", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom", + "libc", + "rand_chacha 0.2.1", + "rand_core 0.5.1", + "rand_hc 0.2.0", ] [[package]] name = "rand_chacha" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" dependencies = [ - "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 0.1.7", + "rand_core 0.3.1", +] + +[[package]] +name = "rand_chacha" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" +dependencies = [ + "c2-chacha", + "rand_core 0.5.1", ] [[package]] name = "rand_core" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" dependencies = [ - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.4.2", ] [[package]] name = "rand_core" -version = "0.4.0" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_distr" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2" +dependencies = [ + "rand 0.7.3", +] [[package]] name = "rand_hc" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.5.1", ] [[package]] name = "rand_isaac" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1", ] [[package]] name = "rand_jitter" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" dependencies = [ - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "rand_core 0.4.2", + "winapi", ] [[package]] name = "rand_os" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" dependencies = [ - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "cloudabi", + "fuchsia-cprng", + "libc", + "rand_core 0.4.2", + "rdrand", + "winapi", ] [[package]] name = "rand_pcg" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" dependencies = [ - "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 0.1.7", + "rand_core 0.4.2", ] [[package]] name = "rand_xorshift" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1", ] [[package]] name = "rdrand" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1", ] [[package]] name = "redox_syscall" version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" [[package]] name = "redox_users" -version = "0.3.0" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" dependencies = [ - "argon2rs 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom", + "redox_syscall", + "rust-argon2", ] [[package]] name = "regex" -version = "0.2.11" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322cf97724bea3ee221b78fe25ac9c46114ebb51747ad5babd51a2fc6a8235a8" dependencies = [ - "aho-corasick 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "utf8-ranges 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "aho-corasick", + "memchr", + "regex-syntax", + "thread_local", ] [[package]] -name = "regex" -version = "1.1.9" +name = "regex-automata" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92b73c2a1770c255c240eaa4ee600df1704a38dc3feaa6e949e7fcd4f8dc09f9" dependencies = [ - "aho-corasick 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "utf8-ranges 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2", ] [[package]] name = "regex-syntax" -version = "0.5.6" +version = "0.6.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b28dfe3fe9badec5dbf0a79a9cccad2cfc2ab5484bdb3e44cbd1ae8b3ba2be06" + +[[package]] +name = "result-like" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "656a4c5b3da40e99028cf562dd5b475a0e6c678adf165526ea5943103f4eeb9b" dependencies = [ - "ucd-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "is-macro", ] [[package]] -name = "regex-syntax" -version = "0.6.8" +name = "rust-argon2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" dependencies = [ - "ucd-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "base64", + "blake2b_simd", + "constant_time_eq", + "crossbeam-utils", ] [[package]] name = "rustc-demangle" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" [[package]] name = "rustc_version" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" dependencies = [ - "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "semver", ] [[package]] name = "rustc_version_runtime" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6de8ecd7fad7731f306f69b6e10ec5a3178c61e464dcc06979427aa4cc891145" dependencies = [ - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version", + "semver", ] [[package]] name = "rustpython" -version = "0.1.0" +version = "0.1.1" dependencies = [ - "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cpython 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)", - "flame 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "flamescope 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "rustpython-compiler 0.1.0", - "rustpython-parser 0.1.0", - "rustpython-vm 0.1.0", - "rustyline 4.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "xdg 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "clap", + "cpython", + "dirs 2.0.2", + "env_logger", + "flame", + "flamescope", + "log", + "num-traits", + "rustpython-compiler", + "rustpython-parser", + "rustpython-vm", + "rustyline", ] [[package]] name = "rustpython-bytecode" -version = "0.1.0" +version = "0.1.1" dependencies = [ - "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "num-bigint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "num-complex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", + "bincode", + "bitflags", + "lz4-compress", + "num-bigint", + "num-complex", + "serde", ] [[package]] name = "rustpython-compiler" -version = "0.1.0" +version = "0.1.1" dependencies = [ - "indexmap 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "num-complex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustpython-bytecode 0.1.0", - "rustpython-parser 0.1.0", + "arrayvec 0.5.1", + "indexmap", + "itertools", + "log", + "num-complex", + "rustpython-bytecode", + "rustpython-parser", ] [[package]] name = "rustpython-derive" -version = "0.1.0" +version = "0.1.1" dependencies = [ - "bincode 1.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "maplit 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro-hack 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "rustpython-bytecode 0.1.0", - "rustpython-compiler 0.1.0", - "syn 0.15.39 (registry+https://github.com/rust-lang/crates.io-index)", + "maplit", + "once_cell", + "proc-macro2 1.0.8", + "quote 1.0.2", + "rustpython-bytecode", + "rustpython-compiler", + "syn 1.0.14", ] [[package]] name = "rustpython-parser" -version = "0.1.0" +version = "0.1.1" dependencies = [ - "lalrpop 0.16.3 (registry+https://github.com/rust-lang/crates.io-index)", - "lalrpop-util 0.16.3 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "num-bigint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-emoji-char 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "wtf8 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "lalrpop", + "lalrpop-util", + "log", + "num-bigint", + "num-traits", + "unic-emoji-char", + "unic-ucd-ident", ] [[package]] name = "rustpython-vm" -version = "0.1.0" +version = "0.1.1" dependencies = [ - "bincode 1.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "blake2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "caseless 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "flame 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "flamer 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "hexf-parse 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "indexmap 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lexical 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "maplit 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "md-5 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "nix 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", - "num-bigint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "num-complex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", - "num-rational 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro-hack 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)", - "pwd 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version_runtime 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "rustpython-bytecode 0.1.0", - "rustpython-compiler 0.1.0", - "rustpython-derive 0.1.0", - "rustpython-parser 0.1.0", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", - "sha-1 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sha3 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", - "statrs 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-casing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-segmentation 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode_categories 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode_names2 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "adler32", + "arr_macro", + "base64", + "bitflags", + "blake2", + "byteorder 1.3.2", + "caseless", + "chrono", + "crc", + "crc32fast", + "csv", + "digest", + "dns-lookup", + "exitcode", + "flame", + "flamer", + "flate2", + "gethostname", + "getrandom", + "hex", + "hexf-parse", + "indexmap", + "is-macro", + "itertools", + "lexical", + "libc", + "libz-sys", + "log", + "maplit", + "md-5", + "nix 0.16.1", + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", + "num_cpus", + "once_cell", + "paste", + "pwd", + "rand 0.7.3", + "rand_distr", + "regex", + "result-like", + "rustc_version_runtime", + "rustpython-bytecode", + "rustpython-compiler", + "rustpython-derive", + "rustpython-parser", + "serde", + "serde_json", + "sha-1", + "sha2", + "sha3", + "socket2", + "statrs", + "subprocess", + "unic", + "unic-common", + "unicode-casing", + "unicode_names2", + "volatile", + "wasm-bindgen", + "winapi", ] [[package]] name = "rustpython_wasm" version = "0.1.0-pre-alpha.2" dependencies = [ - "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rustpython-compiler 0.1.0", - "rustpython-parser 0.1.0", - "rustpython-vm 0.1.0", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", - "serde-wasm-bindgen 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-futures 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "futures", + "js-sys", + "rustpython-compiler", + "rustpython-parser", + "rustpython-vm", + "serde", + "serde-wasm-bindgen", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", ] [[package]] name = "rustyline" -version = "4.1.0" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de64be8eecbe428b6924f1d8430369a01719fbb182c26fa431ddbb0a95f5315d" dependencies = [ - "dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "nix 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-segmentation 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "utf8parse 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "dirs 2.0.2", + "libc", + "log", + "memchr", + "nix 0.14.1", + "unicode-segmentation", + "unicode-width", + "utf8parse", + "winapi", ] [[package]] name = "ryu" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "ryu" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "scoped_threadpool" -version = "0.1.9" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8" [[package]] name = "semver" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "semver-parser", ] [[package]] name = "semver-parser" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.97" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" dependencies = [ - "serde_derive 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive", ] [[package]] name = "serde-wasm-bindgen" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ee6f12f7ed0e7ad2e55200da37dbabc2cadeb942355c5b629aa3771f5ac5636" dependencies = [ - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv", + "js-sys", + "serde", + "wasm-bindgen", ] [[package]] name = "serde_derive" -version = "1.0.97" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.39 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8", + "quote 1.0.2", + "syn 1.0.14", ] [[package]] name = "serde_json" -version = "1.0.40" +version = "1.0.45" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eab8f15f15d6c41a154c1b128a22f2dfabe350ef53c40953d84e36155c91192b" dependencies = [ - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa", + "ryu", + "serde", ] [[package]] name = "sha-1" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" dependencies = [ - "block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "opaque-debug 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "block-buffer", + "digest", + "fake-simd", + "opaque-debug", ] [[package]] name = "sha2" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27044adfd2e1f077f649f59deb9490d3941d674002f7d062870a60ebe9bd47a0" dependencies = [ - "block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "opaque-debug 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "block-buffer", + "digest", + "fake-simd", + "opaque-debug", ] [[package]] name = "sha3" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd26bc0e7a2e3a7c959bc494caf58b72ee0c71d67704e9520f736ca7e4853ecf" dependencies = [ - "block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "keccak 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "opaque-debug 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "block-buffer", + "byte-tools", + "digest", + "keccak", + "opaque-debug", ] [[package]] name = "siphasher" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b8de496cf83d4ed58b6be86c3a275b8602f6ffe98d3024a869e124147a9a3ac" [[package]] name = "smallvec" -version = "0.6.10" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" [[package]] -name = "sourcefile" -version = "0.1.4" +name = "socket2" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8b74de517221a2cb01a53349cf54182acdc31a074727d3079068448c0676d85" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "winapi", +] [[package]] -name = "stackvector" -version = "1.0.6" +name = "sourcefile" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "4bf77cb82ba8453b42b6ae1d692e4cdc92f9a47beaf89a847c8be83f4e328ad3" [[package]] name = "static_assertions" -version = "0.2.5" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f3eb36b47e512f8f1c9e3d10c2c1965bc992bd9cdb024fa581e2194501c83d3" [[package]] name = "statrs" -version = "0.10.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cce16f6de653e88beca7bd13780d08e09d4489dbca1f9210e041bc4852481382" dependencies = [ - "rand 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.3", ] [[package]] name = "string_cache" -version = "0.7.3" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89c058a82f9fd69b1becf8c274f412281038877c553182f1d02eb027045a2d67" dependencies = [ - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "new_debug_unreachable 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "phf_shared 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)", - "precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)", - "string_cache_codegen 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0", + "new_debug_unreachable", + "phf_shared", + "precomputed-hash", + "serde", + "string_cache_codegen", + "string_cache_shared", ] [[package]] name = "string_cache_codegen" -version = "0.4.2" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f45ed1b65bf9a4bf2f7b7dc59212d1926e9eaf00fa998988e420fd124467c6" dependencies = [ - "phf_generator 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)", - "phf_shared 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_generator", + "phf_shared", + "proc-macro2 1.0.8", + "quote 1.0.2", + "string_cache_shared", ] [[package]] name = "string_cache_shared" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1884d1bc09741d466d9b14e6d37ac89d6909cbcac41dd9ae982d4d063bbedfc" [[package]] name = "strsim" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "strsim" -version = "0.9.2" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c" + +[[package]] +name = "subprocess" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7d50729bec6e0706af02ead50d1209a063f6813199cf99262cce281b05a942a" +dependencies = [ + "libc", + "winapi", +] [[package]] name = "subtle" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" + +[[package]] +name = "syn" +version = "0.15.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" +dependencies = [ + "proc-macro2 0.4.30", + "quote 0.6.13", + "unicode-xid 0.1.0", +] [[package]] name = "syn" -version = "0.15.39" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af6f3550d8dff9ef7dc34d384ac6f107e5d31c8f57d9f28e0081503f547ac8f5" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8", + "quote 1.0.2", + "unicode-xid 0.2.0", ] [[package]] name = "synstructure" -version = "0.10.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.39 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8", + "quote 1.0.2", + "syn 1.0.14", + "unicode-xid 0.2.0", ] [[package]] name = "term" -version = "0.4.6" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd106a334b7657c10b7c540a0106114feadeb4dc314513e97df481d5d966f42" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2", + "dirs 1.0.5", + "winapi", ] [[package]] name = "termcolor" -version = "1.0.5" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" dependencies = [ - "wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util", ] [[package]] name = "textwrap" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" dependencies = [ - "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width", ] [[package]] name = "thread-id" version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1" dependencies = [ - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "redox_syscall", + "winapi", ] [[package]] name = "thread_local" -version = "0.3.6" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" dependencies = [ - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0", ] [[package]] name = "time" version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" dependencies = [ - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "redox_syscall", + "winapi", ] [[package]] name = "typenum" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "ucd-util" -version = "0.1.3" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9" [[package]] name = "unic" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31748f3e294dc6a9243a44686e8155a162af9a11cd56e07c0ebbc530b2a8a87" dependencies = [ - "unic-bidi 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-common 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-emoji 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-idna 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-normal 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-segment 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-bidi", + "unic-char", + "unic-common", + "unic-emoji", + "unic-idna", + "unic-normal", + "unic-segment", + "unic-ucd", ] [[package]] name = "unic-bidi" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1356b759fb6a82050666f11dce4b6fe3571781f1449f3ef78074e408d468ec09" dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-bidi 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "matches", + "unic-ucd-bidi", ] [[package]] name = "unic-char" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af25df79bd134107f088ba725d9c470600f16263205d0be36c75e75b020bac0a" dependencies = [ - "unic-char-basics 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char-range 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-basics", + "unic-char-property", + "unic-char-range", ] [[package]] name = "unic-char-basics" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20e5d239bc6394309225a0c1b13e1d059565ff2cfef1a437aff4a5871fa06c4b" [[package]] name = "unic-char-property" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8c57a407d9b6fa02b4795eb81c5b6652060a15a7903ea981f3d723e6c0be221" dependencies = [ - "unic-char-range 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-range", ] [[package]] name = "unic-char-range" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0398022d5f700414f6b899e10b8348231abf9173fa93144cbc1a43b9793c1fbc" [[package]] name = "unic-common" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80d7ff825a6a654ee85a63e80f92f054f904f21e7d12da4e22f9834a4aaa35bc" [[package]] name = "unic-emoji" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74193f32f7966ad20b819e70e29c6f1ac8c386692a9d5e90078eef80ea008bfb" dependencies = [ - "unic-emoji-char 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-emoji-char", ] [[package]] name = "unic-emoji-char" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b07221e68897210270a38bde4babb655869637af0f69407f96053a34f76494d" dependencies = [ - "unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char-range 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-property", + "unic-char-range", + "unic-ucd-version", ] [[package]] name = "unic-idna" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "621e9cf526f2094d2c2ced579766458a92f8f422d6bb934c503ba1a95823a62d" dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-idna-mapping 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-idna-punycode 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-normal 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-bidi 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-normal 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "matches", + "unic-idna-mapping", + "unic-idna-punycode", + "unic-normal", + "unic-ucd-bidi", + "unic-ucd-normal", + "unic-ucd-version", ] [[package]] name = "unic-idna-mapping" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4de70fd4e5331537347a50a0dbc938efb1f127c9f6e5efec980fc90585aa1343" dependencies = [ - "unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char-range 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-property", + "unic-char-range", + "unic-ucd-version", ] [[package]] name = "unic-idna-punycode" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06feaedcbf9f1fc259144d833c0d630b8b15207b0486ab817d29258bc89f2f8a" [[package]] name = "unic-normal" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f09d64d33589a94628bc2aeb037f35c2e25f3f049c7348b5aa5580b48e6bba62" dependencies = [ - "unic-ucd-normal 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-ucd-normal", ] [[package]] name = "unic-segment" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4ed5d26be57f84f176157270c112ef57b86debac9cd21daaabbe56db0f88f23" dependencies = [ - "unic-ucd-segment 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-ucd-segment", ] [[package]] name = "unic-ucd" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "625b18f7601e1127504a20ae731dc3c7826d0e86d5f7fe3434f8137669240efd" dependencies = [ - "unic-ucd-age 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-bidi 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-block 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-case 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-category 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-common 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-hangul 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-ident 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-name 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-name_aliases 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-normal 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-segment 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-ucd-age", + "unic-ucd-bidi", + "unic-ucd-block", + "unic-ucd-case", + "unic-ucd-category", + "unic-ucd-common", + "unic-ucd-hangul", + "unic-ucd-ident", + "unic-ucd-name", + "unic-ucd-name_aliases", + "unic-ucd-normal", + "unic-ucd-segment", + "unic-ucd-version", ] [[package]] name = "unic-ucd-age" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8cfdfe71af46b871dc6af2c24fcd360e2f3392ee4c5111877f2947f311671c" dependencies = [ - "unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char-range 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-property", + "unic-char-range", + "unic-ucd-version", ] [[package]] name = "unic-ucd-bidi" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1d568b51222484e1f8209ce48caa6b430bf352962b877d592c29ab31fb53d8c" dependencies = [ - "unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char-range 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-property", + "unic-char-range", + "unic-ucd-version", ] [[package]] name = "unic-ucd-block" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b2a16f2d7ecd25325a1053ca5a66e7fa1b68911a65c5e97f8d2e1b236b6f1d7" dependencies = [ - "unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char-range 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-property", + "unic-char-range", + "unic-ucd-version", ] [[package]] name = "unic-ucd-case" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d98d6246a79bac6cf66beee01422bda7c882e11d837fa4969bfaaba5fdea6d3" dependencies = [ - "unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char-range 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-property", + "unic-char-range", + "unic-ucd-version", ] [[package]] name = "unic-ucd-category" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8d4591f5fcfe1bd4453baaf803c40e1b1e69ff8455c47620440b46efef91c0" dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char-range 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "matches", + "unic-char-property", + "unic-char-range", + "unic-ucd-version", ] [[package]] name = "unic-ucd-common" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9b78b910beafa1aae5c59bf00877c6cece1c5db28a1241ad801e86cecdff4ad" dependencies = [ - "unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char-range 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-property", + "unic-char-range", + "unic-ucd-version", ] [[package]] name = "unic-ucd-hangul" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb1dc690e19010e1523edb9713224cba5ef55b54894fe33424439ec9a40c0054" dependencies = [ - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-ucd-version", ] [[package]] name = "unic-ucd-ident" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e230a37c0381caa9219d67cf063aa3a375ffed5bf541a452db16e744bdab6987" dependencies = [ - "unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char-range 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-property", + "unic-char-range", + "unic-ucd-version", ] [[package]] name = "unic-ucd-name" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8fc55a45b2531089dc1773bf60c1f104b38e434b774ffc37b9c29a9b0f492e" dependencies = [ - "unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-hangul 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-property", + "unic-ucd-hangul", + "unic-ucd-version", ] [[package]] name = "unic-ucd-name_aliases" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b7674212643087699ba247a63dd05f1204c7e4880ec9342e545a7cffcc6a46f" dependencies = [ - "unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-property", + "unic-ucd-version", ] [[package]] name = "unic-ucd-normal" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86aed873b8202d22b13859dda5fe7c001d271412c31d411fd9b827e030569410" dependencies = [ - "unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char-range 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-category 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-hangul 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-property", + "unic-char-range", + "unic-ucd-category", + "unic-ucd-hangul", + "unic-ucd-version", ] [[package]] name = "unic-ucd-segment" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2079c122a62205b421f499da10f3ee0f7697f012f55b675e002483c73ea34700" dependencies = [ - "unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-char-range 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-property", + "unic-char-range", + "unic-ucd-version", ] [[package]] name = "unic-ucd-version" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96bd2f2237fe450fcd0a1d2f5f4e91711124f7857ba2e964247776ebeeb7b0c4" dependencies = [ - "unic-common 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-common", ] [[package]] name = "unicode-casing" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "623f59e6af2a98bdafeb93fa277ac8e1e40440973001ca15cf4ae1541cd16d56" [[package]] name = "unicode-normalization" -version = "0.1.8" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" dependencies = [ - "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec", ] [[package]] name = "unicode-segmentation" -version = "1.3.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" [[package]] name = "unicode-width" -version = "0.1.5" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479" [[package]] name = "unicode-xid" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" [[package]] -name = "unicode_categories" -version = "0.1.1" +name = "unicode-xid" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" [[package]] name = "unicode_names2" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "unreachable" -version = "1.0.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "a7a928b876ff873d4a0ac966acce72423879dd86afcf190017aa700207188078" [[package]] -name = "utf8-ranges" -version = "1.0.3" +name = "utf8parse" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8772a4ccbb4e89959023bc5b7cb8623a795caa7092d99f3aa9501b9484d4557d" [[package]] -name = "utf8parse" -version = "0.1.1" +name = "vcpkg" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" [[package]] name = "vec_map" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" [[package]] name = "version_check" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" [[package]] name = "void" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + +[[package]] +name = "volatile" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6af0edf5b4faacc31fc51159244d78d65ec580f021afcef7bd53c04aeabc7f29" + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasm-bindgen" -version = "0.2.48" +version = "0.2.58" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5205e9afdf42282b192e2310a5b463a6d1c1d774e30dc3c791ac37ab42d2616c" dependencies = [ - "wasm-bindgen-macro 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.48" +version = "0.2.58" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11cdb95816290b525b32587d76419facd99662a07e59d3cdb560488a819d9a45" dependencies = [ - "bumpalo 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.39 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)", + "bumpalo", + "lazy_static 1.4.0", + "log", + "proc-macro2 1.0.8", + "quote 1.0.2", + "syn 1.0.14", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.3.25" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83420b37346c311b9ed822af41ec2e82839bfe99867ec6c54e2da43b7538771c" dependencies = [ - "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "futures", + "js-sys", + "wasm-bindgen", + "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.48" +version = "0.2.58" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "574094772ce6921576fb6f2e3f7497b8a76273b6db092be18fc48a082de09dc3" dependencies = [ - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro-support 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2", + "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.48" +version = "0.2.58" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e85031354f25eaebe78bb7db1c3d86140312a911a106b2e29f9cc440ce3e7668" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.39 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-backend 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.8", + "quote 1.0.2", + "syn 1.0.14", + "wasm-bindgen-backend", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.48" +version = "0.2.58" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5e7e61fc929f4c0dddb748b102ebf9f632e2b8d739f2016542b4de2965a9601" [[package]] name = "wasm-bindgen-webidl" -version = "0.2.48" +version = "0.2.58" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef012a0d93fc0432df126a8eaf547b2dce25a8ce9212e1d3cbeef5c11157975d" dependencies = [ - "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.39 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-backend 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)", - "weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", + "anyhow", + "heck", + "log", + "proc-macro2 1.0.8", + "quote 1.0.2", + "syn 1.0.14", + "wasm-bindgen-backend", + "weedle", ] [[package]] name = "web-sys" -version = "0.3.25" +version = "0.3.35" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aaf97caf6aa8c2b1dac90faf0db529d9d63c93846cca4911856f78a83cebf53b" dependencies = [ - "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)", - "sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-webidl 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)", + "anyhow", + "js-sys", + "sourcefile", + "wasm-bindgen", + "wasm-bindgen-webidl", ] [[package]] name = "weedle" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bb43f70885151e629e2a19ce9e50bd730fd436cfd4b666894c9ce4de9141164" dependencies = [ - "nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "nom", ] [[package]] name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" dependencies = [ - "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80" dependencies = [ - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "wincolor" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wtf8" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "xdg" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[metadata] -"checksum aho-corasick 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "81ce3d38065e618af2d7b77e10c5ad9a069859b4be3c2250f674af3840d9c8a5" -"checksum aho-corasick 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)" = "36b7aa1ccb7d7ea3f437cf025a2ab1c47cc6c1bc9fc84918ff449def12f5e282" -"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -"checksum argon2rs 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3f67b0b6a86dae6e67ff4ca2b6201396074996379fba2b92ff649126f37cb392" -"checksum arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b8d73f9beda665eaa98ab9e4f7442bd4e7de6652587de55b2525e52e29c1b0ba" -"checksum ascii-canvas 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b385d69402821a1c254533a011a312531cbcc0e3e24f19bbb4747a5a2daf37e2" -"checksum atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "1803c647a3ec87095e7ae7acfca019e98de5ec9a7d01343f611cf3152ed71a90" -"checksum autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "22130e92352b948e7e82a49cdb0aa94f2211761117f29e052dd397c1ac33542b" -"checksum backtrace 0.3.33 (registry+https://github.com/rust-lang/crates.io-index)" = "88fb679bc9af8fa639198790a77f52d345fe13656c08b43afa9424c206b731c6" -"checksum backtrace-sys 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)" = "82a830b4ef2d1124a711c71d263c5abdc710ef8e907bd508c88be475cebc422b" -"checksum bincode 1.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9f04a5e50dc80b3d5d35320889053637d15011aed5e66b66b37ae798c65da6f7" -"checksum bit-set 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e84c238982c4b1e1ee668d136c510c67a13465279c0cb367ea6baf6310620a80" -"checksum bit-vec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f59bbe95d4e52a6398ec21238d31577f2b28a9d86807f06ca59d191d8440d0bb" -"checksum bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d155346769a6855b86399e9bc3814ab343cd3d62c7e985113d46a0ec3c281fd" -"checksum blake2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "91721a6330935673395a0607df4d49a9cb90ae12d259f1b3e0a3f6e1d486872e" -"checksum blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" -"checksum block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -"checksum block-padding 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "6d4dc3af3ee2e12f3e5d224e5e1e3d73668abbeb69e566d361f7d5563a4fdf09" -"checksum build_const 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "39092a32794787acd8525ee150305ff051b0aa6cc2abaf193924f5ab05425f39" -"checksum bumpalo 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2cd43d82f27d68911e6ee11ee791fb248f138f5d69424dc02e098d4f152b0b05" -"checksum byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" -"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" -"checksum caseless 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "808dab3318747be122cb31d36de18d4d1c81277a76f8332a02b81a3d73463d7f" -"checksum cc 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)" = "39f75544d7bbaf57560d2168f28fd649ff9c76153874db88bdbdfd839b1a7e7d" -"checksum cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b486ce3ccf7ffd79fdeb678eac06a9e6c09fc88d33836340becb8fffe87c5e33" -"checksum chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "45912881121cb26fad7c38c17ba7daa18764771836b34fab7d3fbd93ed633878" -"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" -"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -"checksum constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8ff012e225ce166d4422e0e78419d901719760f62ae2b7969ca6b564d1b54a9e" -"checksum cpython 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b489034e723e7f5109fecd19b719e664f89ef925be785885252469e9822fa940" -"checksum crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d663548de7f5cca343f1e0a48d14dcfb0e9eb4e079ec58883b7251539fa10aeb" -"checksum crypto-mac 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -"checksum diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "3c2b69f912779fbb121ceb775d74d51e915af17aaebc38d28a592843a2dd0a3a" -"checksum digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -"checksum dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901" -"checksum docopt 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7f525a586d310c87df72ebcd98009e57f1cc030c8c268305287a476beb653969" -"checksum either 1.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5527cfe0d098f36e3f8839852688e63c8fff1c90b2b405aef730615f9a7bcf7b" -"checksum ena 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f56c93cc076508c549d9bb747f79aa9b4eb098be7b8cad8830c3137ef52d1e00" -"checksum env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)" = "15b0a4d2e39f8420210be8b27eeda28029729e2fd4291019455016c348240c38" -"checksum failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "795bd83d3abeb9220f257e597aa0080a508b27533824adf336529648f6abf7e2" -"checksum failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "ea1063915fd7ef4309e222a5a07cf9c319fb9c7836b1f89b85458672dbb127e1" -"checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" -"checksum fixedbitset 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "86d4de0081402f5e88cdac65c8dcdcc73118c1a7a465e2a05f0da05843a8ea33" -"checksum flame 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1fc2706461e1ee94f55cab2ed2e3d34ae9536cfa830358ef80acff1a3dacab30" -"checksum flamer 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f2add1a5e84b1ed7b5d00cdc21789a28e0a8f4e427b677313c773880ba3c4dac" -"checksum flamescope 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6e3e6aee625a28b97be4308bccc2eb5f81d9ec606b3f29e13c0f459ce20dd136" -"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" -"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" -"checksum futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "45dc39533a6cae6da2b56da48edae506bb767ec07370f86f70fc062e9d435869" -"checksum generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" -"checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" -"checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77" -"checksum hexf-parse 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "79296f72d53a89096cbc9a88c9547ee8dfe793388674620e2207593d370550ac" -"checksum humantime 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3ca7e5f2e110db35f93b837c81797f3714500b81d517bf20c431b16d3ca4f114" -"checksum indexmap 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7e81a7c05f79578dbc15793d8b619db9ba32b4577003ef3af1a91c416798c58d" -"checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358" -"checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" -"checksum js-sys 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)" = "da3ea71161651a4cd97d999b2da139109c537b15ab33abc8ae4ead38deac8a03" -"checksum keccak 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" -"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -"checksum lalrpop 0.16.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4e2e80bee40b22bca46665b4ef1f3cd88ed0fb043c971407eac17a0712c02572" -"checksum lalrpop-util 0.16.3 (registry+https://github.com/rust-lang/crates.io-index)" = "33b27d8490dbe1f9704b0088d61e8d46edc10d5673a8829836c6ded26a9912c7" -"checksum lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73" -"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14" -"checksum lexical 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "93de1b2ed9c7f01aac327bf84542a053b6cd15761defdcfaceb27e1d1b871e74" -"checksum lexical-core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3f8673fab7063c2cac37d299c8a1a7beb720e78f71500098e4a3c137fdf025bf" -"checksum libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "d44e80633f007889c7eff624b709ab43c92d708caad982295768a7b13ca3b5eb" -"checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" -"checksum log 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "c275b6ad54070ac2d665eef9197db647b32239c9d244bfb6f041a766d00da5b3" -"checksum maplit 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "08cbb6b4fef96b6d77bfc40ec491b1690c779e77b05cd9f07f787ed376fd4c43" -"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" -"checksum md-5 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a18af3dcaf2b0219366cdb4e2af65a6101457b415c3d1a5c71dd9c2b7c77b9c8" -"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e" -"checksum new_debug_unreachable 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f40f005c60db6e03bae699e414c58bf9aa7ea02a2d0b9bfbcf19286cc4c82b30" -"checksum nix 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4dbdc256eaac2e3bd236d93ad999d3479ef775c863dbda3068c4006a92eec51b" -"checksum nix 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6c722bee1037d430d0f8e687bbdbf222f27cc6e4e68d5caf630857bb2b6dbdce" -"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945" -"checksum nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" -"checksum num-bigint 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "57450397855d951f1a41305e54851b1a7b8f5d2e349543a02a2effe25459f718" -"checksum num-complex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "107b9be86cd2481930688277b675b0114578227f034674726605b8a482d8baf8" -"checksum num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "e83d528d2677f0518c570baf2b7abdcf0cd2d248860b68507bdcb3e91d4c0cea" -"checksum num-rational 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4e96f040177bb3da242b5b1ecf3f54b5d5af3efbbfb18608977a5d2767b22f10" -"checksum num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1" -"checksum opaque-debug 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "93f5bb2e8e8dec81642920ccff6b61f1eb94fa3020c5a325c9851ff604152409" -"checksum ordermap 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a86ed3f5f244b372d6b1a00b72ef7f8876d0bc6a78a4c9985c53614041512063" -"checksum petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3659d1ee90221741f65dd128d9998311b0e40c5d3c23a62445938214abce4f" -"checksum phf_generator 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)" = "09364cc93c159b8b06b1f4dd8a4398984503483891b0c26b867cf431fb132662" -"checksum phf_shared 0.7.24 (registry+https://github.com/rust-lang/crates.io-index)" = "234f71a15de2288bcb7e3b6515828d22af7ec8598ee6d24c3b526fa0a80b67a0" -"checksum precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" -"checksum proc-macro-hack 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)" = "982a35d1194084ba319d65c4a68d24ca28f5fdb5b8bc20899e4eef8641ea5178" -"checksum proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)" = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -"checksum pwd 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5dd32d8bece608e144ca20251e714ed107cdecdabb20c2d383cfc687825106a5" -"checksum python3-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "61e4aac43f833fd637e429506cb2ac9d7df672c4b68f2eaaa163649b7fdc0444" -"checksum quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0" -"checksum quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -"checksum rand 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c618c47cd3ebd209790115ab837de41425723956ad3ce2e6a7f09890947cacb9" -"checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -"checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -"checksum rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d0e7a549d590831370895ab7ba4ea0c1b6b011d106b5ff2da6eee112615e6dc0" -"checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -"checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -"checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -"checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -"checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" -"checksum redox_users 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3fe5204c3a17e97dde73f285d49be585df59ed84b50a872baf416e73b62c3828" -"checksum regex 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9329abc99e39129fcceabd24cf5d85b4671ef7c29c50e972bc5afe32438ec384" -"checksum regex 1.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "d9d8297cc20bbb6184f8b45ff61c8ee6a9ac56c156cec8e38c3e5084773c44ad" -"checksum regex-syntax 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7d707a4fa2637f2dca2ef9fd02225ec7661fe01a53623c1e6515b6916511f7a7" -"checksum regex-syntax 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)" = "9b01330cce219c1c6b2e209e5ed64ccd587ae5c67bed91c0b49eecf02ae40e21" -"checksum rustc-demangle 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)" = "a7f4dccf6f4891ebcc0c39f9b6eb1a83b9bf5d747cb439ec6fba4f3b977038af" -"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -"checksum rustc_version_runtime 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6de8ecd7fad7731f306f69b6e10ec5a3178c61e464dcc06979427aa4cc891145" -"checksum rustyline 4.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0f47ea1ceb347d2deae482d655dc8eef4bd82363d3329baffa3818bd76fea48b" -"checksum ryu 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "b96a9549dc8d48f2c283938303c4b5a77aa29bfbc5b54b084fb1630408899a8f" -"checksum ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c92464b447c0ee8c4fb3824ecc8383b81717b9f1e74ba2e72540aef7b9f82997" -"checksum scoped_threadpool 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "1d51f5df5af43ab3f1360b429fa5e0152ac5ce8c0bd6485cae490332e96846a8" -"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -"checksum serde 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)" = "d46b3dfedb19360a74316866cef04687cd4d6a70df8e6a506c63512790769b72" -"checksum serde-wasm-bindgen 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7ee6f12f7ed0e7ad2e55200da37dbabc2cadeb942355c5b629aa3771f5ac5636" -"checksum serde_derive 1.0.97 (registry+https://github.com/rust-lang/crates.io-index)" = "c22a0820adfe2f257b098714323563dd06426502abbbce4f51b72ef544c5027f" -"checksum serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)" = "051c49229f282f7c6f3813f8286cc1e3323e8051823fce42c7ea80fe13521704" -"checksum sha-1 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "23962131a91661d643c98940b20fcaffe62d776a823247be80a48fcb8b6fce68" -"checksum sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b4d8bfd0e469f417657573d8451fb33d16cfe0989359b93baf3a1ffc639543d" -"checksum sha3 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd26bc0e7a2e3a7c959bc494caf58b72ee0c71d67704e9520f736ca7e4853ecf" -"checksum siphasher 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0b8de496cf83d4ed58b6be86c3a275b8602f6ffe98d3024a869e124147a9a3ac" -"checksum smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ab606a9c5e214920bb66c458cd7be8ef094f813f20fe77a54cc7dbfff220d4b7" -"checksum sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4bf77cb82ba8453b42b6ae1d692e4cdc92f9a47beaf89a847c8be83f4e328ad3" -"checksum stackvector 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "1c4725650978235083241fab0fdc8e694c3de37821524e7534a1a9061d1068af" -"checksum static_assertions 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "c19be23126415861cb3a23e501d34a708f7f9b2183c5252d690941c2e69199d5" -"checksum statrs 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "10102ac8d55e35db2b3fafc26f81ba8647da2e15879ab686a67e6d19af2685e8" -"checksum string_cache 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "25d70109977172b127fe834e5449e5ab1740b9ba49fa18a2020f509174f25423" -"checksum string_cache_codegen 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1eea1eee654ef80933142157fdad9dd8bc43cf7c74e999e369263496f04ff4da" -"checksum string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b1884d1bc09741d466d9b14e6d37ac89d6909cbcac41dd9ae982d4d063bbedfc" -"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" -"checksum strsim 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "032c03039aae92b350aad2e3779c352e104d919cb192ba2fabbd7b831ce4f0f6" -"checksum subtle 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" -"checksum syn 0.15.39 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d960b829a55e56db167e861ddb43602c003c7be0bee1d345021703fac2fb7c" -"checksum synstructure 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "02353edf96d6e4dc81aea2d8490a7e9db177bf8acb0e951c24940bf866cb313f" -"checksum term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "fa63644f74ce96fbeb9b794f66aff2a52d601cbd5e80f4b97123e3899f4570f1" -"checksum termcolor 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "96d6098003bde162e4277c70665bd87c326f5a0c3f3fbfb285787fa482d54e6e" -"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -"checksum thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1" -"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" -"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" -"checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169" -"checksum ucd-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "535c204ee4d8434478593480b8f86ab45ec9aae0e83c568ca81abf0fd0e88f86" -"checksum unic 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e31748f3e294dc6a9243a44686e8155a162af9a11cd56e07c0ebbc530b2a8a87" -"checksum unic-bidi 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1356b759fb6a82050666f11dce4b6fe3571781f1449f3ef78074e408d468ec09" -"checksum unic-char 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "af25df79bd134107f088ba725d9c470600f16263205d0be36c75e75b020bac0a" -"checksum unic-char-basics 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "20e5d239bc6394309225a0c1b13e1d059565ff2cfef1a437aff4a5871fa06c4b" -"checksum unic-char-property 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a8c57a407d9b6fa02b4795eb81c5b6652060a15a7903ea981f3d723e6c0be221" -"checksum unic-char-range 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0398022d5f700414f6b899e10b8348231abf9173fa93144cbc1a43b9793c1fbc" -"checksum unic-common 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "80d7ff825a6a654ee85a63e80f92f054f904f21e7d12da4e22f9834a4aaa35bc" -"checksum unic-emoji 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "74193f32f7966ad20b819e70e29c6f1ac8c386692a9d5e90078eef80ea008bfb" -"checksum unic-emoji-char 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0b07221e68897210270a38bde4babb655869637af0f69407f96053a34f76494d" -"checksum unic-idna 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "621e9cf526f2094d2c2ced579766458a92f8f422d6bb934c503ba1a95823a62d" -"checksum unic-idna-mapping 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4de70fd4e5331537347a50a0dbc938efb1f127c9f6e5efec980fc90585aa1343" -"checksum unic-idna-punycode 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "06feaedcbf9f1fc259144d833c0d630b8b15207b0486ab817d29258bc89f2f8a" -"checksum unic-normal 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f09d64d33589a94628bc2aeb037f35c2e25f3f049c7348b5aa5580b48e6bba62" -"checksum unic-segment 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e4ed5d26be57f84f176157270c112ef57b86debac9cd21daaabbe56db0f88f23" -"checksum unic-ucd 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "625b18f7601e1127504a20ae731dc3c7826d0e86d5f7fe3434f8137669240efd" -"checksum unic-ucd-age 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6c8cfdfe71af46b871dc6af2c24fcd360e2f3392ee4c5111877f2947f311671c" -"checksum unic-ucd-bidi 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d1d568b51222484e1f8209ce48caa6b430bf352962b877d592c29ab31fb53d8c" -"checksum unic-ucd-block 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6b2a16f2d7ecd25325a1053ca5a66e7fa1b68911a65c5e97f8d2e1b236b6f1d7" -"checksum unic-ucd-case 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d98d6246a79bac6cf66beee01422bda7c882e11d837fa4969bfaaba5fdea6d3" -"checksum unic-ucd-category 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1b8d4591f5fcfe1bd4453baaf803c40e1b1e69ff8455c47620440b46efef91c0" -"checksum unic-ucd-common 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e9b78b910beafa1aae5c59bf00877c6cece1c5db28a1241ad801e86cecdff4ad" -"checksum unic-ucd-hangul 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "eb1dc690e19010e1523edb9713224cba5ef55b54894fe33424439ec9a40c0054" -"checksum unic-ucd-ident 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e230a37c0381caa9219d67cf063aa3a375ffed5bf541a452db16e744bdab6987" -"checksum unic-ucd-name 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c8fc55a45b2531089dc1773bf60c1f104b38e434b774ffc37b9c29a9b0f492e" -"checksum unic-ucd-name_aliases 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6b7674212643087699ba247a63dd05f1204c7e4880ec9342e545a7cffcc6a46f" -"checksum unic-ucd-normal 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "86aed873b8202d22b13859dda5fe7c001d271412c31d411fd9b827e030569410" -"checksum unic-ucd-segment 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2079c122a62205b421f499da10f3ee0f7697f012f55b675e002483c73ea34700" -"checksum unic-ucd-version 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "96bd2f2237fe450fcd0a1d2f5f4e91711124f7857ba2e964247776ebeeb7b0c4" -"checksum unicode-casing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "623f59e6af2a98bdafeb93fa277ac8e1e40440973001ca15cf4ae1541cd16d56" -"checksum unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "141339a08b982d942be2ca06ff8b076563cbe223d1befd5450716790d44e2426" -"checksum unicode-segmentation 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1967f4cdfc355b37fd76d2a954fb2ed3871034eb4f26d60537d88795cfc332a9" -"checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526" -"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" -"checksum unicode_categories 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" -"checksum unicode_names2 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0dc6c5da0c8d7200f9488cc346bd30ba62bcd9f79ef937ea6573132e3d507df9" -"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" -"checksum utf8-ranges 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9d50aa7650df78abf942826607c62468ce18d9019673d4a2ebe1865dbb96ffde" -"checksum utf8parse 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8772a4ccbb4e89959023bc5b7cb8623a795caa7092d99f3aa9501b9484d4557d" -"checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" -"checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" -"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -"checksum wasm-bindgen 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)" = "4de97fa1806bb1a99904216f6ac5e0c050dc4f8c676dc98775047c38e5c01b55" -"checksum wasm-bindgen-backend 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)" = "5d82c170ef9f5b2c63ad4460dfcee93f3ec04a9a36a4cc20bc973c39e59ab8e3" -"checksum wasm-bindgen-futures 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)" = "73c25810ee684c909488c214f55abcbc560beb62146d352b9588519e73c2fed9" -"checksum wasm-bindgen-macro 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)" = "f07d50f74bf7a738304f6b8157f4a581e1512cd9e9cdb5baad8c31bbe8ffd81d" -"checksum wasm-bindgen-macro-support 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)" = "95cf8fe77e45ba5f91bc8f3da0c3aa5d464b3d8ed85d84f4d4c7cc106436b1d7" -"checksum wasm-bindgen-shared 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)" = "d9c2d4d4756b2e46d3a5422e06277d02e4d3e1d62d138b76a4c681e925743623" -"checksum wasm-bindgen-webidl 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)" = "24e47859b4eba3d3b9a5c2c299f9d6f8d0b613671315f6f0c5c7f835e524b36a" -"checksum web-sys 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)" = "86d515d2f713d3a6ab198031d2181b7540f8e319e4637ec2d4a41a208335ef29" -"checksum weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bb43f70885151e629e2a19ce9e50bd730fd436cfd4b666894c9ce4de9141164" -"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" -"checksum winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f10e386af2b13e47c89e7236a7a14a086791a2b88ebad6df9bf42040195cf770" -"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" -"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7168bab6e1daee33b4557efd0e95d5ca70a03706d39fa5f3fe7a236f584b03c9" -"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -"checksum wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "561ed901ae465d6185fa7864d63fbd5720d0ef718366c9a4dc83cf6170d7e9ba" -"checksum wtf8 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d6b9309a86639c488a8eb2b5331cb5127cc9feb0a94a0db4b5d1ab5b84977956" -"checksum xdg 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d089681aa106a86fade1b0128fb5daf07d5867a509ab036d99988dec80429a57" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/Cargo.toml b/Cargo.toml index 9a163b07a6..d26f57e996 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rustpython" -version = "0.1.0" +version = "0.1.1" authors = ["RustPython Team"] edition = "2018" description = "A python interpreter written in rust." @@ -15,24 +15,24 @@ name = "bench" path = "./benchmarks/bench.rs" [features] -default = ["rustpython-vm/use-proc-macro-hack"] flame-it = ["rustpython-vm/flame-it", "flame", "flamescope"] freeze-stdlib = ["rustpython-vm/freeze-stdlib"] [dependencies] -log="0.4.1" -env_logger="0.5.10" -clap = "2.31.2" -rustpython-compiler = {path = "compiler", version = "0.1.0"} -rustpython-parser = {path = "parser", version = "0.1.0"} -rustpython-vm = {path = "vm", version = "0.1.0"} -xdg = "2.2.0" +log = "0.4" +env_logger = "0.7" +clap = "2.33" +rustpython-compiler = {path = "compiler", version = "0.1.1"} +rustpython-parser = {path = "parser", version = "0.1.1"} +rustpython-vm = {path = "vm", version = "0.1.1"} +dirs = "2.0" +num-traits = "0.2.8" flame = { version = "0.2", optional = true } flamescope = { version = "0.1", optional = true } -[target.'cfg(not(target_os = "redox"))'.dependencies] -rustyline = "4.1.0" +[target.'cfg(not(target_os = "wasi"))'.dependencies] +rustyline = "6.0" [dev-dependencies.cpython] @@ -41,3 +41,13 @@ version = "0.2" [[bin]] name = "rustpython" path = "src/main.rs" + +[patch.crates-io] +# REDOX START, Uncommment when you want to compile/check with redoxer +# time = { git = "https://gitlab.redox-os.org/redox-os/time.git", branch = "redox-unix" } +# nix = { git = "https://github.com/AdminXVII/nix", branch = "add-redox-support" } +# # following patches are just waiting on a new version to be released to crates.io +# socket2 = { git = "https://github.com/alexcrichton/socket2-rs" } +# rustyline = { git = "https://github.com/kkawakam/rustyline" } +# libc = { git = "https://github.com/rust-lang/libc" } +# REDOX END diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 9fd818536e..2fdcde73e3 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -1,42 +1,60 @@ # RustPython Development Guide and Tips -## Code organization +RustPython attracts developers with interest and experience in Rust, Python, +or WebAssembly. Whether you are familiar with Rust, Python, or +WebAssembly, the goal of this Development Guide is to give you the basics to +get set up for developing RustPython and contributing to this project. -- `bytecode/src`: python bytecode representation in rust structures -- `compiler/src`: python compilation to bytecode -- `parser/src`: python lexing, parsing and ast -- `Lib`: Carefully selected / copied files from CPython sourcecode. This is - the python side of the standard library. -- `vm/src`: python virtual machine - - `builtins.rs`: Builtin functions - - `compile.rs`: the python compiler from ast to bytecode - - `obj`: python builtin types - - `stdlib`: Standard library parts implemented in rust. -- `src`: using the other subcrates to bring rustpython to life. -- `docs`: documentation (work in progress) -- `py_code_object`: CPython bytecode to rustpython bytecode converter (work in - progress) -- `wasm`: Binary crate and resources for WebAssembly build -- `tests`: integration test snippets +The contents of the Development Guide include: + +- [Setting up a development environment](#setting-up-a-development-environment) +- [Code style](#code-style) +- [Testing](#testing) +- [Profiling](#profiling) +- [Code organization](#code-organization) +- [Understanding internals](#understanding-internals) +- [Questions](#questions) + +## Setting up a development environment + +RustPython requires the following: + +- Rust latest stable version (e.g 1.38.0 at Oct 1st 2019) + - To check Rust version: `rustc --version` + - If you have `rustup` on your system, enter to update to the latest + stable version: `rustup update stable` + - If you do not have Rust installed, use [rustup](https://rustup.rs/) to + do so. +- CPython version 3.7.4 or higher + - CPython can be installed by your operating system's package manager, + from the [Python website](https://www.python.org/downloads/), or + using a third-party distribution, such as + [Anaconda](https://www.anaconda.com/distribution/). +- [Optional] The Python package, `pytest`, is used for testing Python code + snippets. To install, enter `python3 -m pip install pytest`. ## Code style -The code style used is the default +The Rust code style used is the default [rustfmt](https://github.com/rust-lang/rustfmt) codestyle. Please format your code accordingly. We also use [clippy](https://github.com/rust-lang/rust-clippy) -to detect rust code issues. +to detect rust code issues. + +Python code should follow the +[PEP 8](https://www.python.org/dev/peps/pep-0008/) style. We also use +[flake8](http://flake8.pycqa.org/en/latest/) to check Python code style. ## Testing -To test rustpython, there is a collection of python snippets located in the -`tests/snippets` directory. To run those tests do the following: +To test RustPython's functionality, a collection of Python snippets is located +in the `tests/snippets` directory and can be run using `pytest`: ```shell $ cd tests $ pytest -v ``` -There also are some unit tests, you can run those with cargo: +Rust unit tests can be run with `cargo`: ```shell $ cargo test --all @@ -44,17 +62,106 @@ $ cargo test --all ## Profiling -To profile rustpython, simply build in release mode with the `flame-it` feature. -This will generate a file `flamescope.json`, which you can then view at +To profile RustPython, build it in `release` mode with the `flame-it` feature. +This will generate a file `flamescope.json`, which can be viewed at https://speedscope.app. -```sh +```shell $ cargo run --release --features flame-it script.py $ cat flamescope.json {} ``` -You can also pass the `--output-file` option to choose which file to output to -(or stdout if you specify `-`), and the `--output-format` option to choose if -you want to output in the speedscope json format (default), text, or a raw html -viewer (currently broken). +You can specify another file name other than the default by using the +`--output-file` option to specify a file name (or `stdout` if you specify `-`). +The `--output-format` option determines the format of the output file. +The speedscope json format (default), text, or raw html can be passed. There +exists a raw html viewer which is currently broken, and we welcome a PR to fix it. + +## Code organization + +Understanding a new codebase takes time. Here's a brief view of the +repository's structure: + +- `bytecode/src`: python bytecode representation in rust structures +- `compiler/src`: python compilation to bytecode +- `derive/src`: Rust language extensions and macros specific to rustpython +- `parser/src`: python lexing, parsing and ast +- `Lib`: Carefully selected / copied files from CPython sourcecode. This is + the python side of the standard library. +- `vm/src`: python virtual machine + - `builtins.rs`: Builtin functions + - `compile.rs`: the python compiler from ast to bytecode + - `obj`: python builtin types + - `stdlib`: Standard library parts implemented in rust. +- `src`: using the other subcrates to bring rustpython to life. +- `docs`: documentation (work in progress) +- `py_code_object`: CPython bytecode to rustpython bytecode converter (work in + progress) +- `wasm`: Binary crate and resources for WebAssembly build +- `tests`: integration test snippets + +## Understanding Internals + +The RustPython workspace includes the `rustpython` top-level crate. The `Cargo.toml` +file in the root of the repo provide configuration of the crate and the +implementation is found in the `src` directory (specifically, +`src/main.rs`). + +The top-level `rustpython` binary depends on several lower-level crates including: + +- `rustpython-parser` (implementation in `parser/src`) +- `rustpython-compiler` (implementation in `compiler/src`) +- `rustpython-vm` (implementation in `vm/src`) + +Together, these crates provide the functions of a programming language and +enable a line of code to go through a series of steps: + +- parse the line of source code into tokens +- determine if the tokens are valid syntax +- create an Abstract Syntax Tree (AST) +- compile the AST into bytecode +- execute the bytecode in the virtual machine (VM). + +### rustpython-parser + +This crate contains the lexer and parser to convert a line of code to +an Abstract Syntax Tree (AST): + +- Lexer: `parser/lexer.rs` converts Python source code into tokens +- Parser: `parser/parser.rs` takes the tokens generated by the lexer and parses + the tokens into an AST (Abstract Syntax Tree) where the nodes of the syntax + tree are Rust structs and enums. + - The Parser relies on `LALRPOP`, a Rust parser generator framework. + - More information on parsers and a tutorial can be found in the + [LALRPOP book](https://lalrpop.github.io/lalrpop/README.html). +- AST: `parser/ast.rs` implements in Rust the Python types and expressions + represented by the AST nodes. + +### rustpython-compiler + +The `rustpython-compiler` crate's purpose is to transform the AST (Abstract Syntax +Tree) to bytecode. The implementation of the compiler is found in the +`compiler/src` directory. The compiler implements Python's peephole optimizer +implementation, Symbol table, and streams in Rust. + +Implementation of bytecode structure in Rust is found in the `bytecode/src` +directory. The `bytecode/src/bytecode.rs` contains the representation of +instructions and operations in Rust. Further information about Python's +bytecode instructions can be found in the +[Python documentation](https://docs.python.org/3/library/dis.html#bytecodes). + +### rustpython-vm + +The `rustpython-vm` crate has the important job of running the virtual machine that +executes Python's instructions. The `vm/src` directory contains code to +implement the read and evaluation loop that fetches and dispatches +instructions. This directory also contains the implementation of the +Python Standard Library modules in Rust (`vm/src/stdlib`). In Python +everything can be represented as an Object. `vm/src/obj` directory holds +the Rust code used to represent a Python Object and its methods. + +## Questions + +Have you tried these steps and have a question, please chat with us on +[gitter](https://gitter.im/rustpython/Lobby). diff --git a/Lib/_codecs.py b/Lib/_codecs.py new file mode 100644 index 0000000000..26439b0614 --- /dev/null +++ b/Lib/_codecs.py @@ -0,0 +1,1673 @@ +# Note: +# This *is* now explicitly RPython. +# Please make sure not to break this. + +""" + + _codecs -- Provides access to the codec registry and the builtin + codecs. + + This module should never be imported directly. The standard library + module "codecs" wraps this builtin module for use within Python. + + The codec registry is accessible via: + + register(search_function) -> None + + lookup(encoding) -> (encoder, decoder, stream_reader, stream_writer) + + The builtin Unicode codecs use the following interface: + + _encode(Unicode_object[,errors='strict']) -> + (string object, bytes consumed) + + _decode(char_buffer_obj[,errors='strict']) -> + (Unicode object, bytes consumed) + + _encode() interfaces also accept non-Unicode object as + input. The objects are then converted to Unicode using + PyUnicode_FromObject() prior to applying the conversion. + + These s are available: utf_8, unicode_escape, + raw_unicode_escape, unicode_internal, latin_1, ascii (7-bit), + mbcs (on win32). + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +Copyright (c) Corporation for National Research Initiatives. + +From PyPy v1.0.0 + +""" +#from unicodecodec import * + +__all__ = ['register', 'lookup', 'lookup_error', 'register_error', 'encode', 'decode', + 'latin_1_encode', 'mbcs_decode', 'readbuffer_encode', 'escape_encode', + 'utf_8_decode', 'raw_unicode_escape_decode', 'utf_7_decode', + 'unicode_escape_encode', 'latin_1_decode', 'utf_16_decode', + 'unicode_escape_decode', 'ascii_decode', 'charmap_encode', + 'unicode_internal_encode', 'unicode_internal_decode', 'utf_16_ex_decode', + 'escape_decode', 'charmap_decode', 'utf_7_encode', 'mbcs_encode', + 'ascii_encode', 'utf_16_encode', 'raw_unicode_escape_encode', 'utf_8_encode', + 'utf_16_le_encode', 'utf_16_be_encode', 'utf_16_le_decode', 'utf_16_be_decode',] + +import sys +#/* --- Registry ----------------------------------------------------------- */ +codec_search_path = [] +codec_search_cache = {} +codec_error_registry = {} +codec_need_encodings = [True] + +def register( search_function ): + """register(search_function) + + Register a codec search function. Search functions are expected to take + one argument, the encoding name in all lower case letters, and return + a tuple of functions (encoder, decoder, stream_reader, stream_writer). + """ + + if not callable(search_function): + raise TypeError("argument must be callable") + codec_search_path.append(search_function) + + +def lookup(encoding): + """lookup(encoding) -> (encoder, decoder, stream_reader, stream_writer) + Looks up a codec tuple in the Python codec registry and returns + a tuple of functions. + """ + if not isinstance(encoding, str): + raise TypeError("Encoding must be a string") + normalized_encoding = encoding.replace(" ", "-").lower() + result = codec_search_cache.get(normalized_encoding, None) + if not result: + if codec_need_encodings: + import encodings + if len(codec_search_path) == 0: + raise LookupError("no codec search functions registered: can't find encoding") + del codec_need_encodings[:] + for search in codec_search_path: + result = search(normalized_encoding) + if result: + codec_search_cache[normalized_encoding] = result + return result + if not result: + raise LookupError("unknown encoding: %s" % encoding) + return result + + + +def encode(v, encoding=None, errors='strict'): + """encode(obj, [encoding[,errors]]) -> object + + Encodes obj using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a ValueError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that can handle ValueErrors. + """ + if encoding == None: + encoding = sys.getdefaultencoding() + if not isinstance(encoding, str): + raise TypeError("Encoding must be a string") + if not isinstance(errors, str): + raise TypeError("Errors must be a string") + codec = lookup(encoding) + res = codec.encode(v, errors) + if not isinstance(res, tuple) or len(res) != 2: + raise TypeError("encoder must return a tuple (object, integer)") + return res[0] + +def decode(obj, encoding=None, errors='strict'): + """decode(obj, [encoding[,errors]]) -> object + + Decodes obj using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a ValueError. Other possible values are 'ignore' and 'replace' + as well as any other name registerd with codecs.register_error that is + able to handle ValueErrors. + """ + if encoding == None: + encoding = sys.getdefaultencoding() + if not isinstance(encoding, str): + raise TypeError("Encoding must be a string") + if not isinstance(errors, str): + raise TypeError("Errors must be a string") + codec = lookup(encoding) + res = codec.decode(obj, errors) + if not isinstance(res, tuple) or len(res) != 2: + raise TypeError("encoder must return a tuple (object, integer)") + return res[0] + + +def latin_1_encode( obj, errors='strict'): + """None + """ + res = PyUnicode_EncodeLatin1(obj, len(obj), errors) + res = bytes(res) + return res, len(res) +# XXX MBCS codec might involve ctypes ? +def mbcs_decode(): + """None + """ + pass + +def readbuffer_encode( obj, errors='strict'): + """None + """ + res = str(obj) + return res, len(res) + +def escape_encode( obj, errors='strict'): + """None + """ + s = repr(obj) + v = s[1:-1] + return v, len(v) + +def utf_8_decode( data, errors='strict', final=False): + """None + """ + consumed = len(data) + if final: + consumed = 0 + res, consumed = PyUnicode_DecodeUTF8Stateful(data, len(data), errors, final) + res = ''.join(res) + return res, consumed + +def raw_unicode_escape_decode( data, errors='strict'): + """None + """ + res = PyUnicode_DecodeRawUnicodeEscape(data, len(data), errors) + res = ''.join(res) + return res, len(res) + +def utf_7_decode( data, errors='strict'): + """None + """ + res = PyUnicode_DecodeUTF7(data, len(data), errors) + res = ''.join(res) + return res, len(res) + +def unicode_escape_encode( obj, errors='strict'): + """None + """ + res = unicodeescape_string(obj, len(obj), 0) + res = bytes(res) + return res, len(res) + +def latin_1_decode( data, errors='strict'): + """None + """ + res = PyUnicode_DecodeLatin1(data, len(data), errors) + res = ''.join(res) + return res, len(res) + +def utf_16_decode( data, errors='strict', final=False): + """None + """ + consumed = len(data) + if final: + consumed = 0 + res, consumed, byteorder = PyUnicode_DecodeUTF16Stateful(data, len(data), errors, 'native', final) + res = ''.join(res) + return res, consumed + +def unicode_escape_decode( data, errors='strict'): + """None + """ + res = PyUnicode_DecodeUnicodeEscape(data, len(data), errors) + res = ''.join(res) + return res, len(res) + + +def ascii_decode( data, errors='strict'): + """None + """ + res = PyUnicode_DecodeASCII(data, len(data), errors) + res = ''.join(res) + return res, len(res) + +def charmap_encode(obj, errors='strict', mapping='latin-1'): + """None + """ + + res = PyUnicode_EncodeCharmap(obj, len(obj), mapping, errors) + res = bytes(res) + return res, len(res) + +if sys.maxunicode == 65535: + unicode_bytes = 2 +else: + unicode_bytes = 4 + +def unicode_internal_encode( obj, errors='strict'): + """None + """ + if type(obj) == str: + p = bytearray() + t = [ord(x) for x in obj] + for i in t: + b = bytearray() + for j in range(unicode_bytes): + b.append(i%256) + i >>= 8 + if sys.byteorder == "big": + b.reverse() + p += b + res = bytes(p) + return res, len(res) + else: + res = "You can do better than this" # XXX make this right + return res, len(res) + +def unicode_internal_decode( unistr, errors='strict'): + """None + """ + if type(unistr) == str: + return unistr, len(unistr) + else: + p = [] + i = 0 + if sys.byteorder == "big": + start = unicode_bytes - 1 + stop = -1 + step = -1 + else: + start = 0 + stop = unicode_bytes + step = 1 + while i < len(unistr)-unicode_bytes+1: + t = 0 + h = 0 + for j in range(start, stop, step): + t += ord(unistr[i+j])<<(h*8) + h += 1 + i += unicode_bytes + p += chr(t) + res = ''.join(p) + return res, len(res) + +def utf_16_ex_decode( data, errors='strict', byteorder=0, final=0): + """None + """ + if byteorder == 0: + bm = 'native' + elif byteorder == -1: + bm = 'little' + else: + bm = 'big' + consumed = len(data) + if final: + consumed = 0 + res, consumed, byteorder = PyUnicode_DecodeUTF16Stateful(data, len(data), errors, bm, final) + res = ''.join(res) + return res, consumed, byteorder + +# XXX needs error messages when the input is invalid +def escape_decode(data, errors='strict'): + """None + """ + l = len(data) + i = 0 + res = [] + while i < l: + + if data[i] == '\\': + i += 1 + if i >= l: + raise ValueError("Trailing \\ in string") + else: + if data[i] == '\\': + res += '\\' + elif data[i] == 'n': + res += '\n' + elif data[i] == 't': + res += '\t' + elif data[i] == 'r': + res += '\r' + elif data[i] == 'b': + res += '\b' + elif data[i] == '\'': + res += '\'' + elif data[i] == '\"': + res += '\"' + elif data[i] == 'f': + res += '\f' + elif data[i] == 'a': + res += '\a' + elif data[i] == 'v': + res += '\v' + elif '0' <= data[i] <= '9': + # emulate a strange wrap-around behavior of CPython: + # \400 is the same as \000 because 0400 == 256 + octal = data[i:i+3] + res += chr(int(octal, 8) & 0xFF) + i += 2 + elif data[i] == 'x': + hexa = data[i+1:i+3] + res += chr(int(hexa, 16)) + i += 2 + else: + res += data[i] + i += 1 + res = ''.join(res) + return res, len(res) + +def charmap_decode( data, errors='strict', mapping=None): + """None + """ + res = PyUnicode_DecodeCharmap(data, len(data), mapping, errors) + res = ''.join(res) + return res, len(res) + + +def utf_7_encode( obj, errors='strict'): + """None + """ + res = PyUnicode_EncodeUTF7(obj, len(obj), 0, 0, errors) + res = bytes(res) + return res, len(res) + +def mbcs_encode( obj, errors='strict'): + """None + """ + pass +## return (PyUnicode_EncodeMBCS( +## (obj), +## len(obj), +## errors), +## len(obj)) + + +def ascii_encode( obj, errors='strict'): + """None + """ + res = PyUnicode_EncodeASCII(obj, len(obj), errors) + res = bytes(res) + return res, len(res) + +def utf_16_encode( obj, errors='strict'): + """None + """ + res = PyUnicode_EncodeUTF16(obj, len(obj), errors, 'native') + res = bytes(res) + return res, len(res) + +def raw_unicode_escape_encode( obj, errors='strict'): + """None + """ + res = PyUnicode_EncodeRawUnicodeEscape(obj, len(obj)) + res = bytes(res) + return res, len(res) + +def utf_8_encode( obj, errors='strict'): + """None + """ + res = PyUnicode_EncodeUTF8(obj, len(obj), errors) + res = bytes(res) + return res, len(res) + +def utf_16_le_encode( obj, errors='strict'): + """None + """ + res = PyUnicode_EncodeUTF16(obj, len(obj), errors, 'little') + res = bytes(res) + return res, len(res) + +def utf_16_be_encode( obj, errors='strict'): + """None + """ + res = PyUnicode_EncodeUTF16(obj, len(obj), errors, 'big') + res = bytes(res) + return res, len(res) + +def utf_16_le_decode( data, errors='strict', byteorder=0, final = 0): + """None + """ + consumed = len(data) + if final: + consumed = 0 + res, consumed, byteorder = PyUnicode_DecodeUTF16Stateful(data, len(data), errors, 'little', final) + res = ''.join(res) + return res, consumed + +def utf_16_be_decode( data, errors='strict', byteorder=0, final = 0): + """None + """ + consumed = len(data) + if final: + consumed = 0 + res, consumed, byteorder = PyUnicode_DecodeUTF16Stateful(data, len(data), errors, 'big', final) + res = ''.join(res) + return res, consumed + + +class _PretendCFunction: + def __init__(self, f): + self.__f = f + @property + def _f(self): + return self.__dict__['__f'] + def __repr__(self): + return repr(self._f) + def __str__(self): + return str(self._f) + def __call__(self, *args, **kwargs): + return self._f(*args, **kwargs) + +_mod = globals() +for exp in __all__: + if exp.endswith('_encode') or exp.endswith('_decode'): + # encodings expect most of these to be builtin functions, so we pretend they are + _mod[exp] = _PretendCFunction(_mod[exp]) +del _mod, exp + + +def strict_errors(exc): + if isinstance(exc, Exception): + raise exc + else: + raise TypeError("codec must pass exception instance") + +def ignore_errors(exc): + if isinstance(exc, UnicodeEncodeError): + return '', exc.end + elif isinstance(exc, (UnicodeDecodeError, UnicodeTranslateError)): + return '', exc.end + else: + raise TypeError("don't know how to handle %.400s in error callback"%exc) + +Py_UNICODE_REPLACEMENT_CHARACTER = "\ufffd" + +def replace_errors(exc): + if isinstance(exc, UnicodeEncodeError): + return '?'*(exc.end-exc.start), exc.end + elif isinstance(exc, (UnicodeTranslateError, UnicodeDecodeError)): + return Py_UNICODE_REPLACEMENT_CHARACTER*(exc.end-exc.start), exc.end + else: + raise TypeError("don't know how to handle %.400s in error callback"%exc) + +def xmlcharrefreplace_errors(exc): + if isinstance(exc, UnicodeEncodeError): + res = [] + for ch in exc.object[exc.start:exc.end]: + res += '&#' + res += str(ord(ch)) + res += ';' + return ''.join(res), exc.end + else: + raise TypeError("don't know how to handle %.400s in error callback"%type(exc)) + +def backslashreplace_errors(exc): + if isinstance(exc, UnicodeEncodeError): + p = [] + for c in exc.object[exc.start:exc.end]: + p += '\\' + oc = ord(c) + if (oc >= 0x00010000): + p += 'U' + p += "%.8x" % ord(c) + elif (oc >= 0x100): + p += 'u' + p += "%.4x" % ord(c) + else: + p += 'x' + p += "%.2x" % ord(c) + return ''.join(p), exc.end + else: + raise TypeError("don't know how to handle %.400s in error callback"%type(exc)) + + +# ---------------------------------------------------------------------- + +##import sys +##""" Python implementation of CPythons builtin unicode codecs. +## +## Generally the functions in this module take a list of characters an returns +## a list of characters. +## +## For use in the PyPy project""" + + +## indicate whether a UTF-7 character is special i.e. cannot be directly +## encoded: +## 0 - not special +## 1 - special +## 2 - whitespace (optional) +## 3 - RFC2152 Set O (optional) + +utf7_special = [ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 2, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 3, 3, 3, 3, 3, 3, 0, 0, 0, 3, 1, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 3, 3, 3, + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 1, 1, +] +unicode_latin1 = [None]*256 + + +def lookup_error(errors): + """lookup_error(errors) -> handler + + Return the error handler for the specified error handling name + or raise a LookupError, if no handler exists under this name. + """ + + try: + err_handler = codec_error_registry[errors] + except KeyError: + raise LookupError("unknown error handler name %s"%errors) + return err_handler + +def register_error(errors, handler): + """register_error(errors, handler) + + Register the specified error handler under the name + errors. handler must be a callable object, that + will be called with an exception instance containing + information about the location of the encoding/decoding + error and must return a (replacement, new position) tuple. + """ + if callable(handler): + codec_error_registry[errors] = handler + else: + raise TypeError("handler must be callable") + +register_error("strict", strict_errors) +register_error("ignore", ignore_errors) +register_error("replace", replace_errors) +register_error("xmlcharrefreplace", xmlcharrefreplace_errors) +register_error("backslashreplace", backslashreplace_errors) + +def SPECIAL(c, encodeO, encodeWS): + c = ord(c) + return (c>127 or utf7_special[c] == 1) or \ + (encodeWS and (utf7_special[(c)] == 2)) or \ + (encodeO and (utf7_special[(c)] == 3)) +def B64(n): + return ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"[(n) & 0x3f]) +def B64CHAR(c): + return (c.isalnum() or (c) == '+' or (c) == '/') +def UB64(c): + if (c) == '+' : + return 62 + elif (c) == '/': + return 63 + elif (c) >= 'a': + return ord(c) - 71 + elif (c) >= 'A': + return ord(c) - 65 + else: + return ord(c) + 4 + +def ENCODE( ch, bits) : + out = [] + while (bits >= 6): + out += B64(ch >> (bits-6)) + bits -= 6 + return out, bits + +def PyUnicode_DecodeUTF7(s, size, errors): + + starts = s + errmsg = "" + inShift = 0 + bitsleft = 0 + charsleft = 0 + surrogate = 0 + p = [] + errorHandler = None + exc = None + + if (size == 0): + return str('') + i = 0 + while i < size: + + ch = s[i] + if (inShift): + if ((ch == '-') or not B64CHAR(ch)): + inShift = 0 + i += 1 + + while (bitsleft >= 16): + outCh = ((charsleft) >> (bitsleft-16)) & 0xffff + bitsleft -= 16 + + if (surrogate): + ## We have already generated an error for the high surrogate + ## so let's not bother seeing if the low surrogate is correct or not + surrogate = 0 + elif (0xDC00 <= (outCh) and (outCh) <= 0xDFFF): + ## This is a surrogate pair. Unfortunately we can't represent + ## it in a 16-bit character + surrogate = 1 + msg = "code pairs are not supported" + out, x = unicode_call_errorhandler(errors, 'utf-7', msg, s, i-1, i) + p += out + bitsleft = 0 + break + else: + p += chr(outCh ) + #p += out + if (bitsleft >= 6): +## /* The shift sequence has a partial character in it. If +## bitsleft < 6 then we could just classify it as padding +## but that is not the case here */ + msg = "partial character in shift sequence" + out, x = unicode_call_errorhandler(errors, 'utf-7', msg, s, i-1, i) + +## /* According to RFC2152 the remaining bits should be zero. We +## choose to signal an error/insert a replacement character +## here so indicate the potential of a misencoded character. */ + +## /* On x86, a << b == a << (b%32) so make sure that bitsleft != 0 */ +## if (bitsleft and (charsleft << (sizeof(charsleft) * 8 - bitsleft))): +## raise UnicodeDecodeError, "non-zero padding bits in shift sequence" + if (ch == '-') : + if ((i < size) and (s[i] == '-')) : + p += '-' + inShift = 1 + + elif SPECIAL(ch, 0, 0) : + raise UnicodeDecodeError("unexpected special character") + + else: + p += ch + else: + charsleft = (charsleft << 6) | UB64(ch) + bitsleft += 6 + i += 1 +## /* p, charsleft, bitsleft, surrogate = */ DECODE(p, charsleft, bitsleft, surrogate); + elif ( ch == '+' ): + startinpos = i + i += 1 + if (i 0 + else: + out += chr(ord(ch)) + else: + if (not SPECIAL(ch, encodeSetO, encodeWhiteSpace)): + out += B64((charsleft) << (6-bitsleft)) + charsleft = 0 + bitsleft = 0 +## /* Characters not in the BASE64 set implicitly unshift the sequence +## so no '-' is required, except if the character is itself a '-' */ + if (B64CHAR(ch) or ch == '-'): + out += '-' + inShift = False + out += chr(ord(ch)) + else: + bitsleft += 16 + charsleft = (((charsleft) << 16) | ord(ch)) + p, bitsleft = ENCODE(charsleft, bitsleft) + out += p +## /* If the next character is special then we dont' need to terminate +## the shift sequence. If the next character is not a BASE64 character +## or '-' then the shift sequence will be terminated implicitly and we +## don't have to insert a '-'. */ + + if (bitsleft == 0): + if (i + 1 < size): + ch2 = s[i+1] + + if (SPECIAL(ch2, encodeSetO, encodeWhiteSpace)): + pass + elif (B64CHAR(ch2) or ch2 == '-'): + out += '-' + inShift = False + else: + inShift = False + else: + out += '-' + inShift = False + i += 1 + + if (bitsleft): + out += B64(charsleft << (6-bitsleft) ) + out += '-' + + return out + +unicode_empty = '' + +def unicodeescape_string(s, size, quotes): + + p = [] + if (quotes) : + p += 'u' + if (s.find('\'') != -1 and s.find('"') == -1): + p += '"' + else: + p += '\'' + pos = 0 + while (pos < size): + ch = s[pos] + #/* Escape quotes */ + if (quotes and (ch == p[1] or ch == '\\')): + p += '\\' + p += chr(ord(ch)) + pos += 1 + continue + +#ifdef Py_UNICODE_WIDE + #/* Map 21-bit characters to '\U00xxxxxx' */ + elif (ord(ch) >= 0x10000): + p += '\\' + p += 'U' + p += '%08x' % ord(ch) + pos += 1 + continue +#endif + #/* Map UTF-16 surrogate pairs to Unicode \UXXXXXXXX escapes */ + elif (ord(ch) >= 0xD800 and ord(ch) < 0xDC00): + pos += 1 + ch2 = s[pos] + + if (ord(ch2) >= 0xDC00 and ord(ch2) <= 0xDFFF): + ucs = (((ord(ch) & 0x03FF) << 10) | (ord(ch2) & 0x03FF)) + 0x00010000 + p += '\\' + p += 'U' + p += '%08x' % ucs + pos += 1 + continue + + #/* Fall through: isolated surrogates are copied as-is */ + pos -= 1 + + #/* Map 16-bit characters to '\uxxxx' */ + if (ord(ch) >= 256): + p += '\\' + p += 'u' + p += '%04x' % ord(ch) + + #/* Map special whitespace to '\t', \n', '\r' */ + elif (ch == '\t'): + p += '\\' + p += 't' + + elif (ch == '\n'): + p += '\\' + p += 'n' + + elif (ch == '\r'): + p += '\\' + p += 'r' + + #/* Map non-printable US ASCII to '\xhh' */ + elif (ch < ' ' or ch >= 0x7F) : + p += '\\' + p += 'x' + p += '%02x' % ord(ch) + #/* Copy everything else as-is */ + else: + p += chr(ord(ch)) + pos += 1 + if (quotes): + p += p[1] + return p + +def PyUnicode_DecodeASCII(s, size, errors): + +# /* ASCII is equivalent to the first 128 ordinals in Unicode. */ + if (size == 1 and ord(s) < 128) : + return [chr(ord(s))] + if (size == 0): + return [''] #unicode('') + p = [] + pos = 0 + while pos < len(s): + c = s[pos] + if c < 128: + p += chr(c) + pos += 1 + else: + + res = unicode_call_errorhandler( + errors, "ascii", "ordinal not in range(128)", + s, pos, pos+1) + p += res[0] + pos = res[1] + return p + +def PyUnicode_EncodeASCII(p, size, errors): + + return unicode_encode_ucs1(p, size, errors, 128) + +def PyUnicode_AsASCIIString(unistr): + + if not type(unistr) == str: + raise TypeError + return PyUnicode_EncodeASCII(str(unistr), + len(str), + None) + +def PyUnicode_DecodeUTF16Stateful(s, size, errors, byteorder='native', final=True): + + bo = 0 #/* assume native ordering by default */ + consumed = 0 + errmsg = "" + + if sys.byteorder == 'little': + ihi = 1 + ilo = 0 + else: + ihi = 0 + ilo = 1 + + + #/* Unpack UTF-16 encoded data */ + +## /* Check for BOM marks (U+FEFF) in the input and adjust current +## byte order setting accordingly. In native mode, the leading BOM +## mark is skipped, in all other modes, it is copied to the output +## stream as-is (giving a ZWNBSP character). */ + q = 0 + p = [] + if byteorder == 'native': + if (size >= 2): + bom = (ord(s[ihi]) << 8) | ord(s[ilo]) +#ifdef BYTEORDER_IS_LITTLE_ENDIAN + if sys.byteorder == 'little': + if (bom == 0xFEFF): + q += 2 + bo = -1 + elif bom == 0xFFFE: + q += 2 + bo = 1 + else: + if bom == 0xFEFF: + q += 2 + bo = 1 + elif bom == 0xFFFE: + q += 2 + bo = -1 + elif byteorder == 'little': + bo = -1 + else: + bo = 1 + + if (size == 0): + return [''], 0, bo + + if (bo == -1): + #/* force LE */ + ihi = 1 + ilo = 0 + + elif (bo == 1): + #/* force BE */ + ihi = 0 + ilo = 1 + + while (q < len(s)): + + #/* remaining bytes at the end? (size should be even) */ + if (len(s)-q<2): + if not final: + break + errmsg = "truncated data" + startinpos = q + endinpos = len(s) + unicode_call_errorhandler(errors, 'utf-16', errmsg, s, startinpos, endinpos, True) +# /* The remaining input chars are ignored if the callback +## chooses to skip the input */ + + ch = (ord(s[q+ihi]) << 8) | ord(s[q+ilo]) + q += 2 + + if (ch < 0xD800 or ch > 0xDFFF): + p += chr(ch) + continue + + #/* UTF-16 code pair: */ + if (q >= len(s)): + errmsg = "unexpected end of data" + startinpos = q-2 + endinpos = len(s) + unicode_call_errorhandler(errors, 'utf-16', errmsg, s, startinpos, endinpos, True) + + if (0xD800 <= ch and ch <= 0xDBFF): + ch2 = (ord(s[q+ihi]) << 8) | ord(s[q+ilo]) + q += 2 + if (0xDC00 <= ch2 and ch2 <= 0xDFFF): + #ifndef Py_UNICODE_WIDE + if sys.maxunicode < 65536: + p += chr(ch) + p += chr(ch2) + else: + p += chr((((ch & 0x3FF)<<10) | (ch2 & 0x3FF)) + 0x10000) + #endif + continue + + else: + errmsg = "illegal UTF-16 surrogate" + startinpos = q-4 + endinpos = startinpos+2 + unicode_call_errorhandler(errors, 'utf-16', errmsg, s, startinpos, endinpos, True) + + errmsg = "illegal encoding" + startinpos = q-2 + endinpos = startinpos+2 + unicode_call_errorhandler(errors, 'utf-16', errmsg, s, startinpos, endinpos, True) + + return p, q, bo + +# moved out of local scope, especially because it didn't +# have any nested variables. + +def STORECHAR(CH, byteorder): + hi = chr(((CH) >> 8) & 0xff) + lo = chr((CH) & 0xff) + if byteorder == 'little': + return [lo, hi] + else: + return [hi, lo] + +def PyUnicode_EncodeUTF16(s, size, errors, byteorder='little'): + +# /* Offsets from p for storing byte pairs in the right order. */ + + + p = [] + bom = sys.byteorder + if (byteorder == 'native'): + + bom = sys.byteorder + p += STORECHAR(0xFEFF, bom) + + if (size == 0): + return "" + + if (byteorder == 'little' ): + bom = 'little' + elif (byteorder == 'big'): + bom = 'big' + + + for c in s: + ch = ord(c) + ch2 = 0 + if (ch >= 0x10000) : + ch2 = 0xDC00 | ((ch-0x10000) & 0x3FF) + ch = 0xD800 | ((ch-0x10000) >> 10) + + p += STORECHAR(ch, bom) + if (ch2): + p += STORECHAR(ch2, bom) + + return p + + +def PyUnicode_DecodeMBCS(s, size, errors): + pass + +def PyUnicode_EncodeMBCS(p, size, errors): + pass + +def unicode_call_errorhandler(errors, encoding, + reason, input, startinpos, endinpos, decode=True): + + errorHandler = lookup_error(errors) + if decode: + exceptionObject = UnicodeDecodeError(encoding, input, startinpos, endinpos, reason) + else: + exceptionObject = UnicodeEncodeError(encoding, input, startinpos, endinpos, reason) + res = errorHandler(exceptionObject) + if isinstance(res, tuple) and isinstance(res[0], str) and isinstance(res[1], int): + newpos = res[1] + if (newpos < 0): + newpos = len(input) + newpos + if newpos < 0 or newpos > len(input): + raise IndexError( "position %d from error handler out of bounds" % newpos) + return res[0], newpos + else: + raise TypeError("encoding error handler must return (unicode, int) tuple, not %s" % repr(res)) + +def PyUnicode_DecodeUTF8(s, size, errors): + return PyUnicode_DecodeUTF8Stateful(s, size, errors, False) + +## /* Map UTF-8 encoded prefix byte to sequence length. zero means +## illegal prefix. see RFC 2279 for details */ +utf8_code_length = [ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 0, 0 +] + +def PyUnicode_DecodeUTF8Stateful(s, size, errors, final): + + consumed = 0 + if (size == 0): + if not final: + consumed = 0 + return '', consumed + p = [] + pos = 0 + while pos < size: + ch = s[pos] + if ch < 0x80: + p += chr(ch) + pos += 1 + continue + + n = utf8_code_length[ch] + startinpos = pos + if (startinpos + n > size): + if not final: + break + else: + errmsg = "unexpected end of data" + endinpos = size + res = unicode_call_errorhandler( + errors, "utf8", errmsg, + s, startinpos, endinpos) + p += res[0] + pos = res[1] + if n == 0: + errmsg = "unexpected code byte" + endinpos = startinpos+1 + res = unicode_call_errorhandler( + errors, "utf8", errmsg, + s, startinpos, endinpos) + p += res[0] + pos = res[1] + elif n == 1: + errmsg = "internal error" + endinpos = startinpos+1 + res = unicode_call_errorhandler( + errors, "utf8", errmsg, + s, startinpos, endinpos) + p += res[0] + pos = res[1] + elif n == 2: + if ((s[pos+1] & 0xc0) != 0x80): + errmsg = "invalid data" + endinpos = startinpos+2 + res = unicode_call_errorhandler( + errors, "utf8", errmsg, + s, startinpos, endinpos) + p += res[0] + pos = res[1] + else: + c = ((s[pos] & 0x1f) << 6) + (s[pos+1] & 0x3f) + if c < 0x80: + errmsg = "illegal encoding" + endinpos = startinpos+2 + res = unicode_call_errorhandler( + errors, "utf8", errmsg, + s, startinpos, endinpos) + p += res[0] + pos = res[1] + else: + p += chr(c) + pos += n + #break + elif n == 3: + if ((s[pos+1] & 0xc0) != 0x80 or + (s[pos+2] & 0xc0) != 0x80): + errmsg = "invalid data" + endinpos = startinpos+3 + res = unicode_call_errorhandler( + errors, "utf8", errmsg, + s, startinpos, endinpos) + p += res[0] + pos = res[1] + else: + c = ((s[pos] & 0x0f) << 12) + \ + ((s[pos+1] & 0x3f) << 6) +\ + (s[pos+2] & 0x3f) + +## /* Note: UTF-8 encodings of surrogates are considered +## legal UTF-8 sequences; +## +## XXX For wide builds (UCS-4) we should probably try +## to recombine the surrogates into a single code +## unit. +## */ + if c < 0x0800: + errmsg = "illegal encoding" + endinpos = startinpos+3 + res = unicode_call_errorhandler( + errors, "utf8", errmsg, + s, startinpos, endinpos) + p += res[0] + pos = res[1] + else: + p += chr(c) + pos += n + elif n == 4: +## case 4: + if ((s[pos+1] & 0xc0) != 0x80 or + (s[pos+2] & 0xc0) != 0x80 or + (s[pos+3] & 0xc0) != 0x80): + + errmsg = "invalid data" + startinpos = pos + endinpos = startinpos+4 + res = unicode_call_errorhandler( + errors, "utf8", errmsg, + s, startinpos, endinpos) + p += res[0] + pos = res[1] + else: + c = ((s[pos+0] & 0x7) << 18) + ((s[pos+1] & 0x3f) << 12) +\ + ((s[pos+2] & 0x3f) << 6) + (s[pos+3] & 0x3f) + #/* validate and convert to UTF-16 */ + if ((c < 0x10000) or (c > 0x10ffff)): + #/* minimum value allowed for 4 byte encoding */ + #/* maximum value allowed for UTF-16 */ + + errmsg = "illegal encoding" + startinpos = pos + endinpos = startinpos+4 + res = unicode_call_errorhandler( + errors, "utf8", errmsg, + s, startinpos, endinpos) + p += res[0] + pos = res[1] + else: +#ifdef Py_UNICODE_WIDE + if c < sys.maxunicode: + p += chr(c) + pos += n + else: +## /* compute and append the two surrogates: */ +## /* translate from 10000..10FFFF to 0..FFFF */ + c -= 0x10000 + #/* high surrogate = top 10 bits added to D800 */ + p += chr(0xD800 + (c >> 10)) + #/* low surrogate = bottom 10 bits added to DC00 */ + p += chr(0xDC00 + (c & 0x03FF)) + pos += n + else: +## default: +## /* Other sizes are only needed for UCS-4 */ + errmsg = "unsupported Unicode code range" + startinpos = pos + endinpos = startinpos+n + res = unicode_call_errorhandler( + errors, "utf8", errmsg, + s, startinpos, endinpos) + p += res[0] + pos = res[1] + + #continue + + if not final: + consumed = pos + return p, pos # consumed + +def PyUnicode_EncodeUTF8(s, size, errors): + + #assert(s != None) + assert(size >= 0) + p = bytearray() + i = 0 + while i < size: + ch = s[i] + i += 1 + if (ord(ch) < 0x80): +## /* Encode ASCII */ + p.append(ord(ch)) + elif (ord(ch) < 0x0800) : +## /* Encode Latin-1 */ + p.append(0xc0 | (ord(ch) >> 6)) + p.append(0x80 | (ord(ch) & 0x3f)) + else: +## /* Encode UCS2 Unicode ordinals */ + if (ord(ch) < 0x10000): +## /* Special case: check for high surrogate */ + if (0xD800 <= ord(ch) and ord(ch) <= 0xDBFF and i != size) : + ch2 = s[i] +## /* Check for low surrogate and combine the two to +## form a UCS4 value */ + if (0xDC00 <= ord(ch2) and ord(ch2) <= 0xDFFF) : + ch3 = ((ord(ch) - 0xD800) << 10 | (ord(ch2) - 0xDC00)) + 0x10000 + i += 1 + p += encodeUCS4(ch3) + continue +## /* Fall through: handles isolated high surrogates */ + p.append(0xe0 | (ord(ch) >> 12)) + p.append(0x80 | ((ord(ch) >> 6) & 0x3f)) + p.append(0x80 | (ord(ch) & 0x3f)) + continue + else: + p += encodeUCS4(ord(ch)) + return p + +def encodeUCS4(ch): +## /* Encode UCS4 Unicode ordinals */ + p = bytearray() + p.append(0xf0 | (ch >> 18)) + p.append(0x80 | ((ch >> 12) & 0x3f)) + p.append(0x80 | ((ch >> 6) & 0x3f)) + p.append(0x80 | (ch & 0x3f)) + return p + +#/* --- Latin-1 Codec ------------------------------------------------------ */ + +def PyUnicode_DecodeLatin1(s, size, errors): + #/* Latin-1 is equivalent to the first 256 ordinals in Unicode. */ +## if (size == 1): +## return [PyUnicode_FromUnicode(s, 1)] + pos = 0 + p = [] + while (pos < size): + p += chr(s[pos]) + pos += 1 + return p + +def unicode_encode_ucs1(p, size, errors, limit): + + if limit == 256: + reason = "ordinal not in range(256)" + encoding = "latin-1" + else: + reason = "ordinal not in range(128)" + encoding = "ascii" + + if (size == 0): + return [] + res = bytearray() + pos = 0 + while pos < len(p): + #for ch in p: + ch = p[pos] + + if ord(ch) < limit: + res.append(ord(ch)) + pos += 1 + else: + #/* startpos for collecting unencodable chars */ + collstart = pos + collend = pos+1 + while collend < len(p) and ord(p[collend]) >= limit: + collend += 1 + x = unicode_call_errorhandler(errors, encoding, reason, p, collstart, collend, False) + res += str(x[0]) + pos = x[1] + + return res + +def PyUnicode_EncodeLatin1(p, size, errors): + res = unicode_encode_ucs1(p, size, errors, 256) + return res + +hexdigits = [hex(i)[-1] for i in range(16)]+[hex(i)[-1].upper() for i in range(10, 16)] + +def hexescape(s, pos, digits, message, errors): + chr = 0 + p = [] + if (pos+digits>len(s)): + message = "end of string in escape sequence" + x = unicode_call_errorhandler(errors, "unicodeescape", message, s, pos-2, len(s)) + p += x[0] + pos = x[1] + else: + try: + chr = int(s[pos:pos+digits], 16) + except ValueError: + endinpos = pos + while s[endinpos] in hexdigits: + endinpos += 1 + x = unicode_call_errorhandler(errors, "unicodeescape", message, s, pos-2, + endinpos+1) + p += x[0] + pos = x[1] + #/* when we get here, chr is a 32-bit unicode character */ + else: + if chr <= sys.maxunicode: + p += chr(chr) + pos += digits + + elif (chr <= 0x10ffff): + chr -= 0x10000 + p += chr(0xD800 + (chr >> 10)) + p += chr(0xDC00 + (chr & 0x03FF)) + pos += digits + else: + message = "illegal Unicode character" + x = unicode_call_errorhandler(errors, "unicodeescape", message, s, pos-2, + pos+1) + p += x[0] + pos = x[1] + res = p + return res, pos + +def PyUnicode_DecodeUnicodeEscape(s, size, errors): + + if (size == 0): + return '' + + p = [] + pos = 0 + while (pos < size): +## /* Non-escape characters are interpreted as Unicode ordinals */ + if (s[pos] != '\\') : + p += chr(ord(s[pos])) + pos += 1 + continue +## /* \ - Escapes */ + else: + pos += 1 + if pos >= len(s): + errmessage = "\\ at end of string" + unicode_call_errorhandler(errors, "unicodeescape", errmessage, s, pos-1, size) + ch = s[pos] + pos += 1 + ## /* \x escapes */ + if ch == '\\' : p += '\\' + elif ch == '\'': p += '\'' + elif ch == '\"': p += '\"' + elif ch == 'b' : p += '\b' + elif ch == 'f' : p += '\014' #/* FF */ + elif ch == 't' : p += '\t' + elif ch == 'n' : p += '\n' + elif ch == 'r' : p += '\r' + elif ch == 'v': p += '\013' #break; /* VT */ + elif ch == 'a': p += '\007' # break; /* BEL, not classic C */ + elif '0' <= ch <= '7': + x = ord(ch) - ord('0') + if pos < size: + ch = s[pos] + if '0' <= ch <= '7': + pos += 1 + x = (x<<3) + ord(ch) - ord('0') + if pos < size: + ch = s[pos] + if '0' <= ch <= '7': + pos += 1 + x = (x<<3) + ord(ch) - ord('0') + p += chr(x) + ## /* hex escapes */ + ## /* \xXX */ + elif ch == 'x': + digits = 2 + message = "truncated \\xXX escape" + x = hexescape(s, pos, digits, message, errors) + p += x[0] + pos = x[1] + + # /* \uXXXX */ + elif ch == 'u': + digits = 4 + message = "truncated \\uXXXX escape" + x = hexescape(s, pos, digits, message, errors) + p += x[0] + pos = x[1] + + # /* \UXXXXXXXX */ + elif ch == 'U': + digits = 8 + message = "truncated \\UXXXXXXXX escape" + x = hexescape(s, pos, digits, message, errors) + p += x[0] + pos = x[1] +## /* \N{name} */ + elif ch == 'N': + message = "malformed \\N character escape" + #pos += 1 + look = pos + try: + import unicodedata + except ImportError: + message = "\\N escapes not supported (can't load unicodedata module)" + unicode_call_errorhandler(errors, "unicodeescape", message, s, pos-1, size) + if look < size and s[look] == '{': + #/* look for the closing brace */ + while (look < size and s[look] != '}'): + look += 1 + if (look > pos+1 and look < size and s[look] == '}'): + #/* found a name. look it up in the unicode database */ + message = "unknown Unicode character name" + st = s[pos+1:look] + try: + chr = unicodedata.lookup("%s" % st) + except KeyError as e: + x = unicode_call_errorhandler(errors, "unicodeescape", message, s, pos-1, look+1) + else: + x = chr, look + 1 + p += x[0] + pos = x[1] + else: + x = unicode_call_errorhandler(errors, "unicodeescape", message, s, pos-1, look+1) + else: + x = unicode_call_errorhandler(errors, "unicodeescape", message, s, pos-1, look+1) + else: + p += '\\' + p += ch + return p + +def PyUnicode_EncodeRawUnicodeEscape(s, size): + + if (size == 0): + return '' + + p = bytearray() + for ch in s: +# /* Map 32-bit characters to '\Uxxxxxxxx' */ + if (ord(ch) >= 0x10000): + p += '\\' + p += 'U' + p += '%08x' % (ord(ch)) + elif (ord(ch) >= 256) : +# /* Map 16-bit characters to '\uxxxx' */ + p += b'\\u%04x' % (ord(ch)) +# /* Copy everything else as-is */ + else: + p.append(ord(ch)) + + #p += '\0' + return p + +def charmapencode_output(c, mapping): + + rep = mapping[c] + if isinstance(rep, int) or isinstance(rep, int): + if rep < 256: + return chr(rep) + else: + raise TypeError("character mapping must be in range(256)") + elif isinstance(rep, str): + return rep + elif rep == None: + raise KeyError("character maps to ") + else: + raise TypeError("character mapping must return integer, None or str") + +def PyUnicode_EncodeCharmap(p, size, mapping='latin-1', errors='strict'): + +## /* the following variable is used for caching string comparisons +## * -1=not initialized, 0=unknown, 1=strict, 2=replace, +## * 3=ignore, 4=xmlcharrefreplace */ + +# /* Default to Latin-1 */ + if mapping == 'latin-1': + return PyUnicode_EncodeLatin1(p, size, errors) + if (size == 0): + return '' + inpos = 0 + res = [] + while (inpos", p, inpos, inpos+1, False) + try: + res += [charmapencode_output(ord(y), mapping) for y in x[0]] + except KeyError: + raise UnicodeEncodeError("charmap", p, inpos, inpos+1, + "character maps to ") + inpos += 1 + return res + +def PyUnicode_DecodeCharmap(s, size, mapping, errors): + +## /* Default to Latin-1 */ + if (mapping == None): + return PyUnicode_DecodeLatin1(s, size, errors) + + if (size == 0): + return '' + p = [] + inpos = 0 + while (inpos< len(s)): + + #/* Get mapping (char ordinal -> integer, Unicode char or None) */ + ch = s[inpos] + try: + x = mapping[ord(ch)] + if isinstance(x, int): + if x < 65536: + p += chr(x) + else: + raise TypeError("character mapping must be in range(65536)") + elif isinstance(x, str): + p += x + elif not x: + raise KeyError + else: + raise TypeError + except KeyError: + x = unicode_call_errorhandler(errors, "charmap", + "character maps to ", s, inpos, inpos+1) + p += x[0] + inpos += 1 + return p + +def PyUnicode_DecodeRawUnicodeEscape(s, size, errors): + + if (size == 0): + return '' + pos = 0 + p = [] + while (pos < len(s)): + ch = s[pos] + #/* Non-escape characters are interpreted as Unicode ordinals */ + if (ch != '\\'): + p += chr(ord(ch)) + pos += 1 + continue + startinpos = pos +## /* \u-escapes are only interpreted iff the number of leading +## backslashes is odd */ + bs = pos + while pos < size: + if (s[pos] != '\\'): + break + p += chr(ord(s[pos])) + pos += 1 + + if (((pos - bs) & 1) == 0 or + pos >= size or + (s[pos] != 'u' and s[pos] != 'U')) : + p += chr(ord(s[pos])) + pos += 1 + continue + + p.pop(-1) + if s[pos] == 'u': + count = 4 + else: + count = 8 + pos += 1 + + #/* \uXXXX with 4 hex digits, \Uxxxxxxxx with 8 */ + x = 0 + try: + x = int(s[pos:pos+count], 16) + except ValueError: + res = unicode_call_errorhandler( + errors, "rawunicodeescape", "truncated \\uXXXX", + s, size, pos, pos+count) + p += res[0] + pos = res[1] + else: + #ifndef Py_UNICODE_WIDE + if sys.maxunicode > 0xffff: + if (x > sys.maxunicode): + res = unicode_call_errorhandler( + errors, "rawunicodeescape", "\\Uxxxxxxxx out of range", + s, size, pos, pos+1) + pos = res[1] + p += res[0] + else: + p += chr(x) + pos += count + else: + if (x > 0x10000): + res = unicode_call_errorhandler( + errors, "rawunicodeescape", "\\Uxxxxxxxx out of range", + s, size, pos, pos+1) + pos = res[1] + p += res[0] + + #endif + else: + p += chr(x) + pos += count + + return p diff --git a/Lib/_collections_abc.py b/Lib/_collections_abc.py index 3158a4f16c..53789e729e 100644 --- a/Lib/_collections_abc.py +++ b/Lib/_collections_abc.py @@ -40,7 +40,7 @@ dict_valueiterator = type(iter({}.values())) dict_itemiterator = type(iter({}.items())) list_iterator = type(iter([])) -# list_reverseiterator = type(iter(reversed([]))) +list_reverseiterator = type(iter(reversed([]))) range_iterator = type(iter(range(0))) longrange_iterator = type(iter(range(1 << 1000))) set_iterator = type(iter(set())) @@ -53,13 +53,14 @@ dict_items = type({}.items()) ## misc ## mappingproxy = type(type.__dict__) -# generator = type((lambda: (yield))()) +generator = type((lambda: (yield))()) ## coroutine ## -# async def _coro(): pass -# _coro = _coro() -# coroutine = type(_coro) -# _coro.close() # Prevent ResourceWarning -# del _coro +async def _coro(): pass +_coro = _coro() +coroutine = type(_coro) +_coro.close() # Prevent ResourceWarning +del _coro +# XXX RustPython TODO: async generators # ## asynchronous generator ## # async def _ag(): yield # _ag = _ag() @@ -96,145 +97,145 @@ def __subclasshook__(cls, C): return NotImplemented -# class Awaitable(metaclass=ABCMeta): +class Awaitable(metaclass=ABCMeta): -# __slots__ = () + __slots__ = () -# @abstractmethod -# def __await__(self): -# yield + @abstractmethod + def __await__(self): + yield -# @classmethod -# def __subclasshook__(cls, C): -# if cls is Awaitable: -# return _check_methods(C, "__await__") -# return NotImplemented + @classmethod + def __subclasshook__(cls, C): + if cls is Awaitable: + return _check_methods(C, "__await__") + return NotImplemented -# class Coroutine(Awaitable): +class Coroutine(Awaitable): -# __slots__ = () + __slots__ = () -# @abstractmethod -# def send(self, value): -# """Send a value into the coroutine. -# Return next yielded value or raise StopIteration. -# """ -# raise StopIteration + @abstractmethod + def send(self, value): + """Send a value into the coroutine. + Return next yielded value or raise StopIteration. + """ + raise StopIteration -# @abstractmethod -# def throw(self, typ, val=None, tb=None): -# """Raise an exception in the coroutine. -# Return next yielded value or raise StopIteration. -# """ -# if val is None: -# if tb is None: -# raise typ -# val = typ() -# if tb is not None: -# val = val.with_traceback(tb) -# raise val - -# def close(self): -# """Raise GeneratorExit inside coroutine. -# """ -# try: -# self.throw(GeneratorExit) -# except (GeneratorExit, StopIteration): -# pass -# else: -# raise RuntimeError("coroutine ignored GeneratorExit") + @abstractmethod + def throw(self, typ, val=None, tb=None): + """Raise an exception in the coroutine. + Return next yielded value or raise StopIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val -# @classmethod -# def __subclasshook__(cls, C): -# if cls is Coroutine: -# return _check_methods(C, '__await__', 'send', 'throw', 'close') -# return NotImplemented + def close(self): + """Raise GeneratorExit inside coroutine. + """ + try: + self.throw(GeneratorExit) + except (GeneratorExit, StopIteration): + pass + else: + raise RuntimeError("coroutine ignored GeneratorExit") + @classmethod + def __subclasshook__(cls, C): + if cls is Coroutine: + return _check_methods(C, '__await__', 'send', 'throw', 'close') + return NotImplemented -# Coroutine.register(coroutine) +Coroutine.register(coroutine) -# class AsyncIterable(metaclass=ABCMeta): -# __slots__ = () +class AsyncIterable(metaclass=ABCMeta): -# @abstractmethod -# def __aiter__(self): -# return AsyncIterator() + __slots__ = () -# @classmethod -# def __subclasshook__(cls, C): -# if cls is AsyncIterable: -# return _check_methods(C, "__aiter__") -# return NotImplemented + @abstractmethod + def __aiter__(self): + return AsyncIterator() + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncIterable: + return _check_methods(C, "__aiter__") + return NotImplemented -# class AsyncIterator(AsyncIterable): -# __slots__ = () +class AsyncIterator(AsyncIterable): -# @abstractmethod -# async def __anext__(self): -# """Return the next item or raise StopAsyncIteration when exhausted.""" -# raise StopAsyncIteration + __slots__ = () -# def __aiter__(self): -# return self + @abstractmethod + async def __anext__(self): + """Return the next item or raise StopAsyncIteration when exhausted.""" + raise StopAsyncIteration -# @classmethod -# def __subclasshook__(cls, C): -# if cls is AsyncIterator: -# return _check_methods(C, "__anext__", "__aiter__") -# return NotImplemented + def __aiter__(self): + return self + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncIterator: + return _check_methods(C, "__anext__", "__aiter__") + return NotImplemented -# class AsyncGenerator(AsyncIterator): +class AsyncGenerator(AsyncIterator): + + __slots__ = () + + async def __anext__(self): + """Return the next item from the asynchronous generator. + When exhausted, raise StopAsyncIteration. + """ + return await self.asend(None) + + @abstractmethod + async def asend(self, value): + """Send a value into the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + """ + raise StopAsyncIteration + + @abstractmethod + async def athrow(self, typ, val=None, tb=None): + """Raise an exception in the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val -# __slots__ = () + async def aclose(self): + """Raise GeneratorExit inside coroutine. + """ + try: + await self.athrow(GeneratorExit) + except (GeneratorExit, StopAsyncIteration): + pass + else: + raise RuntimeError("asynchronous generator ignored GeneratorExit") -# async def __anext__(self): -# """Return the next item from the asynchronous generator. -# When exhausted, raise StopAsyncIteration. -# """ -# return await self.asend(None) - -# @abstractmethod -# async def asend(self, value): -# """Send a value into the asynchronous generator. -# Return next yielded value or raise StopAsyncIteration. -# """ -# raise StopAsyncIteration - -# @abstractmethod -# async def athrow(self, typ, val=None, tb=None): -# """Raise an exception in the asynchronous generator. -# Return next yielded value or raise StopAsyncIteration. -# """ -# if val is None: -# if tb is None: -# raise typ -# val = typ() -# if tb is not None: -# val = val.with_traceback(tb) -# raise val - -# async def aclose(self): -# """Raise GeneratorExit inside coroutine. -# """ -# try: -# await self.athrow(GeneratorExit) -# except (GeneratorExit, StopAsyncIteration): -# pass -# else: -# raise RuntimeError("asynchronous generator ignored GeneratorExit") - -# @classmethod -# def __subclasshook__(cls, C): -# if cls is AsyncGenerator: -# return _check_methods(C, '__aiter__', '__anext__', -# 'asend', 'athrow', 'aclose') -# return NotImplemented + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncGenerator: + return _check_methods(C, '__aiter__', '__anext__', + 'asend', 'athrow', 'aclose') + return NotImplemented # AsyncGenerator.register(async_generator) @@ -274,20 +275,20 @@ def __subclasshook__(cls, C): return _check_methods(C, '__iter__', '__next__') return NotImplemented -# Iterator.register(bytes_iterator) -# Iterator.register(bytearray_iterator) +Iterator.register(bytes_iterator) +Iterator.register(bytearray_iterator) # Iterator.register(callable_iterator) -# Iterator.register(dict_keyiterator) -# Iterator.register(dict_valueiterator) -# Iterator.register(dict_itemiterator) -# Iterator.register(list_iterator) -# Iterator.register(list_reverseiterator) -# Iterator.register(range_iterator) -# Iterator.register(longrange_iterator) -# Iterator.register(set_iterator) -# Iterator.register(str_iterator) -# Iterator.register(tuple_iterator) -# Iterator.register(zip_iterator) +Iterator.register(dict_keyiterator) +Iterator.register(dict_valueiterator) +Iterator.register(dict_itemiterator) +Iterator.register(list_iterator) +Iterator.register(list_reverseiterator) +Iterator.register(range_iterator) +Iterator.register(longrange_iterator) +Iterator.register(set_iterator) +Iterator.register(str_iterator) +Iterator.register(tuple_iterator) +Iterator.register(zip_iterator) class Reversible(Iterable): @@ -353,8 +354,7 @@ def __subclasshook__(cls, C): 'send', 'throw', 'close') return NotImplemented -# Generator.register(generator) - +Generator.register(generator) class Sized(metaclass=ABCMeta): diff --git a/Lib/_rp_thread.py b/Lib/_rp_thread.py new file mode 100644 index 0000000000..b843c3ec97 --- /dev/null +++ b/Lib/_rp_thread.py @@ -0,0 +1,7 @@ +import _thread +import _dummy_thread + +for k in _dummy_thread.__all__ + ['_set_sentinel', 'stack_size']: + if k not in _thread.__dict__: + # print('Populating _thread.%s' % k) + setattr(_thread, k, getattr(_dummy_thread, k)) diff --git a/Lib/_sitebuiltins.py b/Lib/_sitebuiltins.py index c29cf4bf8f..18c1a15841 100644 --- a/Lib/_sitebuiltins.py +++ b/Lib/_sitebuiltins.py @@ -8,8 +8,14 @@ # Note this means this module should also avoid keep things alive in its # globals. +import os import sys +sys.stdin = sys.__stdin__ = getattr(sys, '__stdin__', False) or os.fdopen(0, "r") +sys.stdout = sys.__stdout__ = getattr(sys, '__stdout__', False) or os.fdopen(1, "w") +sys.stderr = sys.__stderr__ = getattr(sys, '__stderr__', False) or os.fdopen(2, "w") + + class Quitter(object): def __init__(self, name, eof): self.name = name diff --git a/Lib/_sre.py b/Lib/_sre.py new file mode 100644 index 0000000000..183da73ae8 --- /dev/null +++ b/Lib/_sre.py @@ -0,0 +1,1297 @@ +# NOT_RPYTHON +""" +A pure Python reimplementation of the _sre module from CPython 2.4 +Copyright 2005 Nik Haldimann, licensed under the MIT license + +This code is based on material licensed under CNRI's Python 1.6 license and +copyrighted by: Copyright (c) 1997-2001 by Secret Labs AB +""" + +import array, operator, sys +from sre_constants import ATCODES, OPCODES, CHCODES, MAXREPEAT +from sre_constants import SRE_INFO_PREFIX, SRE_INFO_LITERAL +from sre_constants import SRE_FLAG_UNICODE, SRE_FLAG_LOCALE +import sre_constants + + +import sys + +# Identifying as _sre from Python 2.3 or 2.4 +MAGIC = 20140917 + +# In _sre.c this is bytesize of the code word type of the C implementation. +# There it's 2 for normal Python builds and more for wide unicode builds (large +# enough to hold a 32-bit UCS-4 encoded character). Since here in pure Python +# we only see re bytecodes as Python longs, we shouldn't have to care about the +# codesize. But sre_compile will compile some stuff differently depending on the +# codesize (e.g., charsets). +if sys.maxunicode == 65535: + CODESIZE = 2 +else: + CODESIZE = 4 + +copyright = "_sre.py 2.4c Copyright 2005 by Nik Haldimann" + + +def getcodesize(): + return CODESIZE + + +def compile(pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]): + """Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern + object. Actual compilation to opcodes happens in sre_compile.""" + return SRE_Pattern(pattern, flags, code, groups, groupindex, indexgroup) + +def getlower(char_ord, flags): + if (char_ord < 128) or (flags & SRE_FLAG_UNICODE) \ + or (flags & SRE_FLAG_LOCALE and char_ord < 256): + return ord(chr(char_ord).lower()) + else: + return char_ord + + +class SRE_Pattern(object): + + def __init__(self, pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]): + self.pattern = pattern + self.flags = flags + self.groups = groups + self.groupindex = groupindex # Maps group names to group indices + self._indexgroup = indexgroup # Maps indices to group names + self._code = code + + def match(self, string, pos=0, endpos=sys.maxsize): + """If zero or more characters at the beginning of string match this + regular expression, return a corresponding MatchObject instance. Return + None if the string does not match the pattern.""" + state = _State(string, pos, endpos, self.flags) + if state.match(self._code): + return SRE_Match(self, state) + else: + return None + + def fullmatch(self, string): + """If the whole string matches the regular expression pattern, return a + corresponding match object. Return None if the string does not match the + pattern; note that this is different from a zero-length match.""" + match = self.match(string) + if match and match.start() == 0 and match.end() == len(string): + return match + else: + return None + + def search(self, string, pos=0, endpos=sys.maxsize): + """Scan through string looking for a location where this regular + expression produces a match, and return a corresponding MatchObject + instance. Return None if no position in the string matches the + pattern.""" + state = _State(string, pos, endpos, self.flags) + if state.search(self._code): + return SRE_Match(self, state) + else: + return None + + def findall(self, string, pos=0, endpos=sys.maxsize): + """Return a list of all non-overlapping matches of pattern in string.""" + matchlist = [] + state = _State(string, pos, endpos, self.flags) + while state.start <= state.end: + state.reset() + state.string_position = state.start + if not state.search(self._code): + break + match = SRE_Match(self, state) + if self.groups == 0 or self.groups == 1: + item = match.group(self.groups) + else: + item = match.groups("") + matchlist.append(item) + if state.string_position == state.start: + state.start += 1 + else: + state.start = state.string_position + return matchlist + + def _subx(self, template, string, count=0, subn=False): + filter = template + if not callable(template) and "\\" in template: + # handle non-literal strings ; hand it over to the template compiler + import re + filter = re._subx(self, template) + state = _State(string, 0, sys.maxsize, self.flags) + sublist = [] + + n = last_pos = 0 + while not count or n < count: + state.reset() + state.string_position = state.start + if not state.search(self._code): + break + if last_pos < state.start: + sublist.append(string[last_pos:state.start]) + if not (last_pos == state.start and + last_pos == state.string_position and n > 0): + # the above ignores empty matches on latest position + if callable(filter): + sublist.extend(filter(SRE_Match(self, state))) + else: + sublist.append(filter) + last_pos = state.string_position + n += 1 + if state.string_position == state.start: + state.start += 1 + else: + state.start = state.string_position + + if last_pos < state.end: + sublist.append(string[last_pos:state.end]) + item = "".join(sublist) + if subn: + return item, n + else: + return item + + def sub(self, repl, string, count=0): + """Return the string obtained by replacing the leftmost non-overlapping + occurrences of pattern in string by the replacement repl.""" + return self._subx(repl, string, count, False) + + def subn(self, repl, string, count=0): + """Return the tuple (new_string, number_of_subs_made) found by replacing + the leftmost non-overlapping occurrences of pattern with the replacement + repl.""" + return self._subx(repl, string, count, True) + + def split(self, string, maxsplit=0): + """Split string by the occurrences of pattern.""" + splitlist = [] + state = _State(string, 0, sys.maxsize, self.flags) + n = 0 + last = state.start + while not maxsplit or n < maxsplit: + state.reset() + state.string_position = state.start + if not state.search(self._code): + break + if state.start == state.string_position: # zero-width match + if last == state.end: # or end of string + break + state.start += 1 + continue + splitlist.append(string[last:state.start]) + # add groups (if any) + if self.groups: + match = SRE_Match(self, state) + splitlist.extend(list(match.groups(None))) + n += 1 + last = state.start = state.string_position + splitlist.append(string[last:state.end]) + return splitlist + + def finditer(self, string, pos=0, endpos=sys.maxsize): + """Return a list of all non-overlapping matches of pattern in string.""" + scanner = self.scanner(string, pos, endpos) + return iter(scanner.search, None) + + def scanner(self, string, start=0, end=sys.maxsize): + return SRE_Scanner(self, string, start, end) + + def __copy__(self): + raise TypeError("cannot copy this pattern object") + + def __deepcopy__(self): + raise TypeError("cannot copy this pattern object") + + +class SRE_Scanner(object): + """Undocumented scanner interface of sre.""" + + def __init__(self, pattern, string, start, end): + self.pattern = pattern + self._state = _State(string, start, end, self.pattern.flags) + + def _match_search(self, matcher): + state = self._state + state.reset() + state.string_position = state.start + match = None + if matcher(self.pattern._code): + match = SRE_Match(self.pattern, state) + if match is None or state.string_position == state.start: + state.start += 1 + else: + state.start = state.string_position + return match + + def match(self): + return self._match_search(self._state.match) + + def search(self): + return self._match_search(self._state.search) + + +class SRE_Match(object): + + def __init__(self, pattern, state): + self.re = pattern + self.string = state.string + self.pos = state.pos + self.endpos = state.end + self.lastindex = state.lastindex + self.regs = self._create_regs(state) + if pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup): + # The above upper-bound check should not be necessary, as the re + # compiler is supposed to always provide an _indexgroup list long + # enough. But the re.Scanner class seems to screw up something + # there, test_scanner in test_re won't work without upper-bound + # checking. XXX investigate this and report bug to CPython. + self.lastgroup = pattern._indexgroup[self.lastindex] + else: + self.lastgroup = None + + def _create_regs(self, state): + """Creates a tuple of index pairs representing matched groups.""" + regs = [(state.start, state.string_position)] + for group in range(self.re.groups): + mark_index = 2 * group + if mark_index + 1 < len(state.marks) \ + and state.marks[mark_index] is not None \ + and state.marks[mark_index + 1] is not None: + regs.append((state.marks[mark_index], state.marks[mark_index + 1])) + else: + regs.append((-1, -1)) + return tuple(regs) + + def _get_index(self, group): + if isinstance(group, int): + if group >= 0 and group <= self.re.groups: + return group + else: + if group in self.re.groupindex: + return self.re.groupindex[group] + raise IndexError("no such group") + + def _get_slice(self, group, default): + group_indices = self.regs[group] + if group_indices[0] >= 0: + return self.string[group_indices[0]:group_indices[1]] + else: + return default + + def start(self, group=0): + """Returns the indices of the start of the substring matched by group; + group defaults to zero (meaning the whole matched substring). Returns -1 + if group exists but did not contribute to the match.""" + return self.regs[self._get_index(group)][0] + + def end(self, group=0): + """Returns the indices of the end of the substring matched by group; + group defaults to zero (meaning the whole matched substring). Returns -1 + if group exists but did not contribute to the match.""" + return self.regs[self._get_index(group)][1] + + def span(self, group=0): + """Returns the 2-tuple (m.start(group), m.end(group)).""" + return self.start(group), self.end(group) + + def expand(self, template): + """Return the string obtained by doing backslash substitution and + resolving group references on template.""" + import sre + return sre._expand(self.re, self, template) + + def groups(self, default=None): + """Returns a tuple containing all the subgroups of the match. The + default argument is used for groups that did not participate in the + match (defaults to None).""" + groups = [] + for indices in self.regs[1:]: + if indices[0] >= 0: + groups.append(self.string[indices[0]:indices[1]]) + else: + groups.append(default) + return tuple(groups) + + def groupdict(self, default=None): + """Return a dictionary containing all the named subgroups of the match. + The default argument is used for groups that did not participate in the + match (defaults to None).""" + groupdict = {} + for key, value in list(self.re.groupindex.items()): + groupdict[key] = self._get_slice(value, default) + return groupdict + + def group(self, *args): + """Returns one or more subgroups of the match. Each argument is either a + group index or a group name.""" + if len(args) == 0: + args = (0,) + grouplist = [] + for group in args: + grouplist.append(self._get_slice(self._get_index(group), None)) + if len(grouplist) == 1: + return grouplist[0] + else: + return tuple(grouplist) + + def __copy__(): + raise TypeError("cannot copy this pattern object") + + def __deepcopy__(): + raise TypeError("cannot copy this pattern object") + + +class _State(object): + + def __init__(self, string, start, end, flags): + if isinstance(string, bytes): + string = string.decode() + self.string = string + if start < 0: + start = 0 + if end > len(string): + end = len(string) + self.start = start + self.string_position = self.start + self.end = end + self.pos = start + self.flags = flags + self.reset() + + def reset(self): + self.marks = [] + self.lastindex = -1 + self.marks_stack = [] + self.context_stack = [] + self.repeat = None + + def match(self, pattern_codes): + # Optimization: Check string length. pattern_codes[3] contains the + # minimum length for a string to possibly match. + if pattern_codes[0] == sre_constants.INFO and pattern_codes[3]: + if self.end - self.string_position < pattern_codes[3]: + #_log("reject (got %d chars, need %d)" + # % (self.end - self.string_position, pattern_codes[3])) + return False + + dispatcher = _OpcodeDispatcher() + self.context_stack.append(_MatchContext(self, pattern_codes)) + has_matched = None + while len(self.context_stack) > 0: + context = self.context_stack[-1] + has_matched = dispatcher.match(context) + if has_matched is not None: # don't pop if context isn't done + self.context_stack.pop() + return has_matched + + def search(self, pattern_codes): + flags = 0 + if pattern_codes[0] == sre_constants.INFO: + # optimization info block + # <1=skip> <2=flags> <3=min> <4=max> <5=prefix info> + if pattern_codes[2] & SRE_INFO_PREFIX and pattern_codes[5] > 1: + return self.fast_search(pattern_codes) + flags = pattern_codes[2] + pattern_codes = pattern_codes[pattern_codes[1] + 1:] + + string_position = self.start + if pattern_codes[0] == sre_constants.LITERAL: + # Special case: Pattern starts with a literal character. This is + # used for short prefixes + character = pattern_codes[1] + while True: + while string_position < self.end \ + and ord(self.string[string_position]) != character: + string_position += 1 + if string_position >= self.end: + return False + self.start = string_position + string_position += 1 + self.string_position = string_position + if flags & SRE_INFO_LITERAL: + return True + if self.match(pattern_codes[2:]): + return True + return False + + # General case + while string_position <= self.end: + self.reset() + self.start = self.string_position = string_position + if self.match(pattern_codes): + return True + string_position += 1 + return False + + def fast_search(self, pattern_codes): + """Skips forward in a string as fast as possible using information from + an optimization info block.""" + # pattern starts with a known prefix + # <5=length> <6=skip> <7=prefix data> + flags = pattern_codes[2] + prefix_len = pattern_codes[5] + prefix_skip = pattern_codes[6] # don't really know what this is good for + prefix = pattern_codes[7:7 + prefix_len] + overlap = pattern_codes[7 + prefix_len - 1:pattern_codes[1] + 1] + pattern_codes = pattern_codes[pattern_codes[1] + 1:] + i = 0 + string_position = self.string_position + while string_position < self.end: + while True: + if ord(self.string[string_position]) != prefix[i]: + if i == 0: + break + else: + i = overlap[i] + else: + i += 1 + if i == prefix_len: + # found a potential match + self.start = string_position + 1 - prefix_len + self.string_position = string_position + 1 \ + - prefix_len + prefix_skip + if flags & SRE_INFO_LITERAL: + return True # matched all of pure literal pattern + if self.match(pattern_codes[2 * prefix_skip:]): + return True + i = overlap[i] + break + string_position += 1 + return False + + def set_mark(self, mark_nr, position): + if mark_nr & 1: + # This id marks the end of a group. + self.lastindex = mark_nr // 2 + 1 + if mark_nr >= len(self.marks): + self.marks.extend([None] * (mark_nr - len(self.marks) + 1)) + self.marks[mark_nr] = position + + def get_marks(self, group_index): + marks_index = 2 * group_index + if len(self.marks) > marks_index + 1: + return self.marks[marks_index], self.marks[marks_index + 1] + else: + return None, None + + def marks_push(self): + self.marks_stack.append((self.marks[:], self.lastindex)) + + def marks_pop(self): + self.marks, self.lastindex = self.marks_stack.pop() + + def marks_pop_keep(self): + self.marks, self.lastindex = self.marks_stack[-1] + + def marks_pop_discard(self): + self.marks_stack.pop() + + def lower(self, char_ord): + return getlower(char_ord, self.flags) + + +class _MatchContext(object): + + def __init__(self, state, pattern_codes): + self.state = state + self.pattern_codes = pattern_codes + self.string_position = state.string_position + self.code_position = 0 + self.has_matched = None + + def push_new_context(self, pattern_offset): + """Creates a new child context of this context and pushes it on the + stack. pattern_offset is the offset off the current code position to + start interpreting from.""" + child_context = _MatchContext(self.state, + self.pattern_codes[self.code_position + pattern_offset:]) + self.state.context_stack.append(child_context) + return child_context + + def peek_char(self, peek=0): + return self.state.string[self.string_position + peek] + + def skip_char(self, skip_count): + self.string_position += skip_count + + def remaining_chars(self): + return self.state.end - self.string_position + + def peek_code(self, peek=0): + return self.pattern_codes[self.code_position + peek] + + def skip_code(self, skip_count): + self.code_position += skip_count + + def remaining_codes(self): + return len(self.pattern_codes) - self.code_position + + def at_beginning(self): + return self.string_position == 0 + + def at_end(self): + return self.string_position == self.state.end + + def at_linebreak(self): + return not self.at_end() and _is_linebreak(self.peek_char()) + + def at_boundary(self, word_checker): + if self.at_beginning() and self.at_end(): + return False + that = not self.at_beginning() and word_checker(self.peek_char(-1)) + this = not self.at_end() and word_checker(self.peek_char()) + return this != that + + +class _RepeatContext(_MatchContext): + + def __init__(self, context): + _MatchContext.__init__(self, context.state, + context.pattern_codes[context.code_position:]) + self.count = -1 + self.previous = context.state.repeat + self.last_position = None + + +class _Dispatcher(object): + + DISPATCH_TABLE = None + + def dispatch(self, code, context): + method = self.DISPATCH_TABLE.get(code, self.__class__.unknown) + return method(self, context) + + def unknown(self, code, ctx): + raise NotImplementedError() + + @classmethod + def build_dispatch_table(cls, codes, method_prefix): + if cls.DISPATCH_TABLE is not None: + return + table = {} + for code in codes: + code_name = code.name.lower() + if hasattr(cls, "%s%s" % (method_prefix, code_name)): + table[code] = getattr(cls, "%s%s" % (method_prefix, code_name)) + cls.DISPATCH_TABLE = table + + +class _OpcodeDispatcher(_Dispatcher): + + def __init__(self): + self.executing_contexts = {} + self.at_dispatcher = _AtcodeDispatcher() + self.ch_dispatcher = _ChcodeDispatcher() + self.set_dispatcher = _CharsetDispatcher() + + def match(self, context): + """Returns True if the current context matches, False if it doesn't and + None if matching is not finished, ie must be resumed after child + contexts have been matched.""" + while context.remaining_codes() > 0 and context.has_matched is None: + opcode = context.peek_code() + if not self.dispatch(opcode, context): + return None + if context.has_matched is None: + context.has_matched = False + return context.has_matched + + def dispatch(self, opcode, context): + """Dispatches a context on a given opcode. Returns True if the context + is done matching, False if it must be resumed when next encountered.""" + if id(context) in self.executing_contexts: + generator = self.executing_contexts[id(context)] + del self.executing_contexts[id(context)] + has_finished = next(generator) + else: + method = self.DISPATCH_TABLE.get(opcode, _OpcodeDispatcher.unknown) + has_finished = method(self, context) + if hasattr(has_finished, "__next__"): # avoid using the types module + generator = has_finished + has_finished = next(generator) + if not has_finished: + self.executing_contexts[id(context)] = generator + return has_finished + + def op_success(self, ctx): + # end of pattern + #self._log(ctx, "SUCCESS") + ctx.state.string_position = ctx.string_position + ctx.has_matched = True + return True + + def op_failure(self, ctx): + # immediate failure + #self._log(ctx, "FAILURE") + ctx.has_matched = False + return True + + def general_op_literal(self, ctx, compare, decorate=lambda x: x): + if ctx.at_end() or not compare(decorate(ord(ctx.peek_char())), + decorate(ctx.peek_code(1))): + ctx.has_matched = False + ctx.skip_code(2) + ctx.skip_char(1) + + def op_literal(self, ctx): + # match literal string + # + #self._log(ctx, "LITERAL", ctx.peek_code(1)) + self.general_op_literal(ctx, operator.eq) + return True + + def op_not_literal(self, ctx): + # match anything that is not the given literal character + # + #self._log(ctx, "NOT_LITERAL", ctx.peek_code(1)) + self.general_op_literal(ctx, operator.ne) + return True + + def op_literal_ignore(self, ctx): + # match literal regardless of case + # + #self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1)) + self.general_op_literal(ctx, operator.eq, ctx.state.lower) + return True + + def op_not_literal_ignore(self, ctx): + # match literal regardless of case + # + #self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1)) + self.general_op_literal(ctx, operator.ne, ctx.state.lower) + return True + + def op_at(self, ctx): + # match at given position + # + #self._log(ctx, "AT", ctx.peek_code(1)) + if not self.at_dispatcher.dispatch(ctx.peek_code(1), ctx): + ctx.has_matched = False + return True + ctx.skip_code(2) + return True + + def op_category(self, ctx): + # match at given category + # + #self._log(ctx, "CATEGORY", ctx.peek_code(1)) + if ctx.at_end() or not self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx): + ctx.has_matched = False + return True + ctx.skip_code(2) + ctx.skip_char(1) + return True + + def op_any(self, ctx): + # match anything (except a newline) + # + #self._log(ctx, "ANY") + if ctx.at_end() or ctx.at_linebreak(): + ctx.has_matched = False + return True + ctx.skip_code(1) + ctx.skip_char(1) + return True + + def op_any_all(self, ctx): + # match anything + # + #self._log(ctx, "ANY_ALL") + if ctx.at_end(): + ctx.has_matched = False + return True + ctx.skip_code(1) + ctx.skip_char(1) + return True + + def general_op_in(self, ctx, decorate=lambda x: x): + #self._log(ctx, "OP_IN") + if ctx.at_end(): + ctx.has_matched = False + return + skip = ctx.peek_code(1) + ctx.skip_code(2) # set op pointer to the set code + if not self.check_charset(ctx, decorate(ord(ctx.peek_char()))): + ctx.has_matched = False + return + ctx.skip_code(skip - 1) + ctx.skip_char(1) + + def op_in(self, ctx): + # match set member (or non_member) + # + #self._log(ctx, "OP_IN") + self.general_op_in(ctx) + return True + + def op_in_ignore(self, ctx): + # match set member (or non_member), disregarding case of current char + # + #self._log(ctx, "OP_IN_IGNORE") + self.general_op_in(ctx, ctx.state.lower) + return True + + def op_jump(self, ctx): + # jump forward + # + #self._log(ctx, "JUMP", ctx.peek_code(1)) + ctx.skip_code(ctx.peek_code(1) + 1) + return True + + # skip info + # + op_info = op_jump + + def op_mark(self, ctx): + # set mark + # + #self._log(ctx, "OP_MARK", ctx.peek_code(1)) + ctx.state.set_mark(ctx.peek_code(1), ctx.string_position) + ctx.skip_code(2) + return True + + def op_branch(self, ctx): + # alternation + # <0=skip> code ... + #self._log(ctx, "BRANCH") + ctx.state.marks_push() + ctx.skip_code(1) + current_branch_length = ctx.peek_code(0) + while current_branch_length: + # The following tries to shortcut branches starting with a + # (unmatched) literal. _sre.c also shortcuts charsets here. + if not (ctx.peek_code(1) == sre_constants.LITERAL and \ + (ctx.at_end() or ctx.peek_code(2) != ord(ctx.peek_char()))): + ctx.state.string_position = ctx.string_position + child_context = ctx.push_new_context(1) + yield False + if child_context.has_matched: + ctx.has_matched = True + yield True + ctx.state.marks_pop_keep() + ctx.skip_code(current_branch_length) + current_branch_length = ctx.peek_code(0) + ctx.state.marks_pop_discard() + ctx.has_matched = False + yield True + + def op_repeat_one(self, ctx): + # match repeated sequence (maximizing). + # this operator only works if the repeated item is exactly one character + # wide, and we're not already collecting backtracking points. + # <1=min> <2=max> item tail + mincount = ctx.peek_code(2) + maxcount = ctx.peek_code(3) + #self._log(ctx, "REPEAT_ONE", mincount, maxcount) + + if ctx.remaining_chars() < mincount: + ctx.has_matched = False + yield True + ctx.state.string_position = ctx.string_position + count = self.count_repetitions(ctx, maxcount) + ctx.skip_char(count) + if count < mincount: + ctx.has_matched = False + yield True + if ctx.peek_code(ctx.peek_code(1) + 1) == sre_constants.SUCCESS: + # tail is empty. we're finished + ctx.state.string_position = ctx.string_position + ctx.has_matched = True + yield True + + ctx.state.marks_push() + if ctx.peek_code(ctx.peek_code(1) + 1) == sre_constants.LITERAL: + # Special case: Tail starts with a literal. Skip positions where + # the rest of the pattern cannot possibly match. + char = ctx.peek_code(ctx.peek_code(1) + 2) + while True: + while count >= mincount and \ + (ctx.at_end() or ord(ctx.peek_char()) != char): + ctx.skip_char(-1) + count -= 1 + if count < mincount: + break + ctx.state.string_position = ctx.string_position + child_context = ctx.push_new_context(ctx.peek_code(1) + 1) + yield False + if child_context.has_matched: + ctx.has_matched = True + yield True + ctx.skip_char(-1) + count -= 1 + ctx.state.marks_pop_keep() + + else: + # General case: backtracking + while count >= mincount: + ctx.state.string_position = ctx.string_position + child_context = ctx.push_new_context(ctx.peek_code(1) + 1) + yield False + if child_context.has_matched: + ctx.has_matched = True + yield True + ctx.skip_char(-1) + count -= 1 + ctx.state.marks_pop_keep() + + ctx.state.marks_pop_discard() + ctx.has_matched = False + yield True + + def op_min_repeat_one(self, ctx): + # match repeated sequence (minimizing) + # <1=min> <2=max> item tail + mincount = ctx.peek_code(2) + maxcount = ctx.peek_code(3) + #self._log(ctx, "MIN_REPEAT_ONE", mincount, maxcount) + + if ctx.remaining_chars() < mincount: + ctx.has_matched = False + yield True + ctx.state.string_position = ctx.string_position + if mincount == 0: + count = 0 + else: + count = self.count_repetitions(ctx, mincount) + if count < mincount: + ctx.has_matched = False + yield True + ctx.skip_char(count) + if ctx.peek_code(ctx.peek_code(1) + 1) == sre_constants.SUCCESS: + # tail is empty. we're finished + ctx.state.string_position = ctx.string_position + ctx.has_matched = True + yield True + + ctx.state.marks_push() + while maxcount == MAXREPEAT or count <= maxcount: + ctx.state.string_position = ctx.string_position + child_context = ctx.push_new_context(ctx.peek_code(1) + 1) + yield False + if child_context.has_matched: + ctx.has_matched = True + yield True + ctx.state.string_position = ctx.string_position + if self.count_repetitions(ctx, 1) == 0: + break + ctx.skip_char(1) + count += 1 + ctx.state.marks_pop_keep() + + ctx.state.marks_pop_discard() + ctx.has_matched = False + yield True + + def op_repeat(self, ctx): + # create repeat context. all the hard work is done by the UNTIL + # operator (MAX_UNTIL, MIN_UNTIL) + # <1=min> <2=max> item tail + #self._log(ctx, "REPEAT", ctx.peek_code(2), ctx.peek_code(3)) + repeat = _RepeatContext(ctx) + ctx.state.repeat = repeat + ctx.state.string_position = ctx.string_position + child_context = ctx.push_new_context(ctx.peek_code(1) + 1) + yield False + ctx.state.repeat = repeat.previous + ctx.has_matched = child_context.has_matched + yield True + + def op_max_until(self, ctx): + # maximizing repeat + # <1=min> <2=max> item tail + repeat = ctx.state.repeat + if repeat is None: + raise RuntimeError("Internal re error: MAX_UNTIL without REPEAT.") + mincount = repeat.peek_code(2) + maxcount = repeat.peek_code(3) + ctx.state.string_position = ctx.string_position + count = repeat.count + 1 + #self._log(ctx, "MAX_UNTIL", count) + + if count < mincount: + # not enough matches + repeat.count = count + child_context = repeat.push_new_context(4) + yield False + ctx.has_matched = child_context.has_matched + if not ctx.has_matched: + repeat.count = count - 1 + ctx.state.string_position = ctx.string_position + yield True + + if (count < maxcount or maxcount == MAXREPEAT) \ + and ctx.state.string_position != repeat.last_position: + # we may have enough matches, if we can match another item, do so + repeat.count = count + ctx.state.marks_push() + save_last_position = repeat.last_position # zero-width match protection + repeat.last_position = ctx.state.string_position + child_context = repeat.push_new_context(4) + yield False + repeat.last_position = save_last_position + if child_context.has_matched: + ctx.state.marks_pop_discard() + ctx.has_matched = True + yield True + ctx.state.marks_pop() + repeat.count = count - 1 + ctx.state.string_position = ctx.string_position + + # cannot match more repeated items here. make sure the tail matches + ctx.state.repeat = repeat.previous + child_context = ctx.push_new_context(1) + yield False + ctx.has_matched = child_context.has_matched + if not ctx.has_matched: + ctx.state.repeat = repeat + ctx.state.string_position = ctx.string_position + yield True + + def op_min_until(self, ctx): + # minimizing repeat + # <1=min> <2=max> item tail + repeat = ctx.state.repeat + if repeat is None: + raise RuntimeError("Internal re error: MIN_UNTIL without REPEAT.") + mincount = repeat.peek_code(2) + maxcount = repeat.peek_code(3) + ctx.state.string_position = ctx.string_position + count = repeat.count + 1 + #self._log(ctx, "MIN_UNTIL", count) + + if count < mincount: + # not enough matches + repeat.count = count + child_context = repeat.push_new_context(4) + yield False + ctx.has_matched = child_context.has_matched + if not ctx.has_matched: + repeat.count = count - 1 + ctx.state.string_position = ctx.string_position + yield True + + # see if the tail matches + ctx.state.marks_push() + ctx.state.repeat = repeat.previous + child_context = ctx.push_new_context(1) + yield False + if child_context.has_matched: + ctx.has_matched = True + yield True + ctx.state.repeat = repeat + ctx.state.string_position = ctx.string_position + ctx.state.marks_pop() + + # match more until tail matches + if count >= maxcount and maxcount != MAXREPEAT: + ctx.has_matched = False + yield True + repeat.count = count + child_context = repeat.push_new_context(4) + yield False + ctx.has_matched = child_context.has_matched + if not ctx.has_matched: + repeat.count = count - 1 + ctx.state.string_position = ctx.string_position + yield True + + def general_op_groupref(self, ctx, decorate=lambda x: x): + group_start, group_end = ctx.state.get_marks(ctx.peek_code(1)) + if group_start is None or group_end is None or group_end < group_start: + ctx.has_matched = False + return True + while group_start < group_end: + if ctx.at_end() or decorate(ord(ctx.peek_char())) \ + != decorate(ord(ctx.state.string[group_start])): + ctx.has_matched = False + return True + group_start += 1 + ctx.skip_char(1) + ctx.skip_code(2) + return True + + def op_groupref(self, ctx): + # match backreference + # + #self._log(ctx, "GROUPREF", ctx.peek_code(1)) + return self.general_op_groupref(ctx) + + def op_groupref_ignore(self, ctx): + # match backreference case-insensitive + # + #self._log(ctx, "GROUPREF_IGNORE", ctx.peek_code(1)) + return self.general_op_groupref(ctx, ctx.state.lower) + + def op_groupref_exists(self, ctx): + # codeyes codeno ... + #self._log(ctx, "GROUPREF_EXISTS", ctx.peek_code(1)) + group_start, group_end = ctx.state.get_marks(ctx.peek_code(1)) + if group_start is None or group_end is None or group_end < group_start: + ctx.skip_code(ctx.peek_code(2) + 1) + else: + ctx.skip_code(3) + return True + + def op_assert(self, ctx): + # assert subpattern + # + #self._log(ctx, "ASSERT", ctx.peek_code(2)) + ctx.state.string_position = ctx.string_position - ctx.peek_code(2) + if ctx.state.string_position < 0: + ctx.has_matched = False + yield True + child_context = ctx.push_new_context(3) + yield False + if child_context.has_matched: + ctx.skip_code(ctx.peek_code(1) + 1) + else: + ctx.has_matched = False + yield True + + def op_assert_not(self, ctx): + # assert not subpattern + # + #self._log(ctx, "ASSERT_NOT", ctx.peek_code(2)) + ctx.state.string_position = ctx.string_position - ctx.peek_code(2) + if ctx.state.string_position >= 0: + child_context = ctx.push_new_context(3) + yield False + if child_context.has_matched: + ctx.has_matched = False + yield True + ctx.skip_code(ctx.peek_code(1) + 1) + yield True + + def unknown(self, ctx): + #self._log(ctx, "UNKNOWN", ctx.peek_code()) + raise RuntimeError("Internal re error. Unknown opcode: %s" % ctx.peek_code()) + + def check_charset(self, ctx, char): + """Checks whether a character matches set of arbitrary length. Assumes + the code pointer is at the first member of the set.""" + self.set_dispatcher.reset(char) + save_position = ctx.code_position + result = None + while result is None: + result = self.set_dispatcher.dispatch(ctx.peek_code(), ctx) + ctx.code_position = save_position + return result + + def count_repetitions(self, ctx, maxcount): + """Returns the number of repetitions of a single item, starting from the + current string position. The code pointer is expected to point to a + REPEAT_ONE operation (with the repeated 4 ahead).""" + count = 0 + real_maxcount = ctx.state.end - ctx.string_position + if maxcount < real_maxcount and maxcount != MAXREPEAT: + real_maxcount = maxcount + # XXX could special case every single character pattern here, as in C. + # This is a general solution, a bit hackisch, but works and should be + # efficient. + code_position = ctx.code_position + string_position = ctx.string_position + ctx.skip_code(4) + reset_position = ctx.code_position + while count < real_maxcount: + # this works because the single character pattern is followed by + # a success opcode + ctx.code_position = reset_position + self.dispatch(ctx.peek_code(), ctx) + if ctx.has_matched is False: # could be None as well + break + count += 1 + ctx.has_matched = None + ctx.code_position = code_position + ctx.string_position = string_position + return count + + def _log(self, context, opname, *args): + arg_string = ("%s " * len(args)) % args + _log("|%s|%s|%s %s" % (context.pattern_codes, + context.string_position, opname, arg_string)) + +_OpcodeDispatcher.build_dispatch_table(OPCODES, "op_") + + +class _CharsetDispatcher(_Dispatcher): + + def __init__(self): + self.ch_dispatcher = _ChcodeDispatcher() + + def reset(self, char): + self.char = char + self.ok = True + + def set_failure(self, ctx): + return not self.ok + def set_literal(self, ctx): + # + if ctx.peek_code(1) == self.char: + return self.ok + else: + ctx.skip_code(2) + def set_category(self, ctx): + # + if self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx): + return self.ok + else: + ctx.skip_code(2) + def set_charset(self, ctx): + # (16 bits per code word) + char_code = self.char + ctx.skip_code(1) # point to beginning of bitmap + if CODESIZE == 2: + if char_code < 256 and ctx.peek_code(char_code >> 4) \ + & (1 << (char_code & 15)): + return self.ok + ctx.skip_code(16) # skip bitmap + else: + if char_code < 256 and ctx.peek_code(char_code >> 5) \ + & (1 << (char_code & 31)): + return self.ok + ctx.skip_code(8) # skip bitmap + def set_range(self, ctx): + # + if ctx.peek_code(1) <= self.char <= ctx.peek_code(2): + return self.ok + ctx.skip_code(3) + def set_negate(self, ctx): + self.ok = not self.ok + ctx.skip_code(1) + def set_bigcharset(self, ctx): + # <256 blockindices> + char_code = self.char + count = ctx.peek_code(1) + ctx.skip_code(2) + if char_code < 65536: + block_index = char_code >> 8 + # NB: there are CODESIZE block indices per bytecode + a = array.array("B") + a.frombytes(array.array(CODESIZE == 2 and "H" or "I", + [ctx.peek_code(block_index // CODESIZE)]).tobytes()) + block = a[block_index % CODESIZE] + ctx.skip_code(256 // CODESIZE) # skip block indices + block_value = ctx.peek_code(block * (32 // CODESIZE) + + ((char_code & 255) >> (CODESIZE == 2 and 4 or 5))) + if block_value & (1 << (char_code & ((8 * CODESIZE) - 1))): + return self.ok + else: + ctx.skip_code(256 // CODESIZE) # skip block indices + ctx.skip_code(count * (32 // CODESIZE)) # skip blocks + def unknown(self, ctx): + return False + +_CharsetDispatcher.build_dispatch_table(OPCODES, "set_") + + +class _AtcodeDispatcher(_Dispatcher): + + def at_beginning(self, ctx): + return ctx.at_beginning() + at_beginning_string = at_beginning + def at_beginning_line(self, ctx): + return ctx.at_beginning() or _is_linebreak(ctx.peek_char(-1)) + def at_end(self, ctx): + return (ctx.remaining_chars() == 1 and ctx.at_linebreak()) or ctx.at_end() + def at_end_line(self, ctx): + return ctx.at_linebreak() or ctx.at_end() + def at_end_string(self, ctx): + return ctx.at_end() + def at_boundary(self, ctx): + return ctx.at_boundary(_is_word) + def at_non_boundary(self, ctx): + return not ctx.at_boundary(_is_word) + def at_loc_boundary(self, ctx): + return ctx.at_boundary(_is_loc_word) + def at_loc_non_boundary(self, ctx): + return not ctx.at_boundary(_is_loc_word) + def at_uni_boundary(self, ctx): + return ctx.at_boundary(_is_uni_word) + def at_uni_non_boundary(self, ctx): + return not ctx.at_boundary(_is_uni_word) + def unknown(self, ctx): + return False + +_AtcodeDispatcher.build_dispatch_table(ATCODES, "") + + +class _ChcodeDispatcher(_Dispatcher): + + def category_digit(self, ctx): + return _is_digit(ctx.peek_char()) + def category_not_digit(self, ctx): + return not _is_digit(ctx.peek_char()) + def category_space(self, ctx): + return _is_space(ctx.peek_char()) + def category_not_space(self, ctx): + return not _is_space(ctx.peek_char()) + def category_word(self, ctx): + return _is_word(ctx.peek_char()) + def category_not_word(self, ctx): + return not _is_word(ctx.peek_char()) + def category_linebreak(self, ctx): + return _is_linebreak(ctx.peek_char()) + def category_not_linebreak(self, ctx): + return not _is_linebreak(ctx.peek_char()) + def category_loc_word(self, ctx): + return _is_loc_word(ctx.peek_char()) + def category_loc_not_word(self, ctx): + return not _is_loc_word(ctx.peek_char()) + def category_uni_digit(self, ctx): + return ctx.peek_char().isdigit() + def category_uni_not_digit(self, ctx): + return not ctx.peek_char().isdigit() + def category_uni_space(self, ctx): + return ctx.peek_char().isspace() + def category_uni_not_space(self, ctx): + return not ctx.peek_char().isspace() + def category_uni_word(self, ctx): + return _is_uni_word(ctx.peek_char()) + def category_uni_not_word(self, ctx): + return not _is_uni_word(ctx.peek_char()) + def category_uni_linebreak(self, ctx): + return ord(ctx.peek_char()) in _uni_linebreaks + def category_uni_not_linebreak(self, ctx): + return ord(ctx.peek_char()) not in _uni_linebreaks + def unknown(self, ctx): + return False + +_ChcodeDispatcher.build_dispatch_table(CHCODES, "") + + +_ascii_char_info = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 6, 2, +2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 25, 25, 25, 25, 25, 25, 25, +25, 25, 0, 0, 0, 0, 0, 0, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, +24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, +0, 0, 16, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, +24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 0 ] + +def _is_digit(char): + code = ord(char) + return code < 128 and _ascii_char_info[code] & 1 + +def _is_space(char): + code = ord(char) + return code < 128 and _ascii_char_info[code] & 2 + +def _is_word(char): + # NB: non-ASCII chars aren't words according to _sre.c + code = ord(char) + return code < 128 and _ascii_char_info[code] & 16 + +def _is_loc_word(char): + return (not (ord(char) & ~255) and char.isalnum()) or char == '_' + +def _is_uni_word(char): + return chr(ord(char)).isalnum() or char == '_' + +def _is_linebreak(char): + return char == "\n" + +# Static list of all unicode codepoints reported by Py_UNICODE_ISLINEBREAK. +_uni_linebreaks = [10, 13, 28, 29, 30, 133, 8232, 8233] + +def _log(message): + if 0: + print(message) diff --git a/Lib/_threading_local.py b/Lib/_threading_local.py new file mode 100644 index 0000000000..76f10229d2 --- /dev/null +++ b/Lib/_threading_local.py @@ -0,0 +1,248 @@ +"""Thread-local objects. + +(Note that this module provides a Python version of the threading.local + class. Depending on the version of Python you're using, there may be a + faster one available. You should always import the `local` class from + `threading`.) + +Thread-local objects support the management of thread-local data. +If you have data that you want to be local to a thread, simply create +a thread-local object and use its attributes: + + >>> mydata = local() + >>> mydata.number = 42 + >>> mydata.number + 42 + +You can also access the local-object's dictionary: + + >>> mydata.__dict__ + {'number': 42} + >>> mydata.__dict__.setdefault('widgets', []) + [] + >>> mydata.widgets + [] + +What's important about thread-local objects is that their data are +local to a thread. If we access the data in a different thread: + + >>> log = [] + >>> def f(): + ... items = sorted(mydata.__dict__.items()) + ... log.append(items) + ... mydata.number = 11 + ... log.append(mydata.number) + + >>> import threading + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + >>> log + [[], 11] + +we get different data. Furthermore, changes made in the other thread +don't affect data seen in this thread: + + >>> mydata.number + 42 + +Of course, values you get from a local object, including a __dict__ +attribute, are for whatever thread was current at the time the +attribute was read. For that reason, you generally don't want to save +these values across threads, as they apply only to the thread they +came from. + +You can create custom local objects by subclassing the local class: + + >>> class MyLocal(local): + ... number = 2 + ... initialized = False + ... def __init__(self, **kw): + ... if self.initialized: + ... raise SystemError('__init__ called too many times') + ... self.initialized = True + ... self.__dict__.update(kw) + ... def squared(self): + ... return self.number ** 2 + +This can be useful to support default values, methods and +initialization. Note that if you define an __init__ method, it will be +called each time the local object is used in a separate thread. This +is necessary to initialize each thread's dictionary. + +Now if we create a local object: + + >>> mydata = MyLocal(color='red') + +Now we have a default number: + + >>> mydata.number + 2 + +an initial color: + + >>> mydata.color + 'red' + >>> del mydata.color + +And a method that operates on the data: + + >>> mydata.squared() + 4 + +As before, we can access the data in a separate thread: + + >>> log = [] + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + >>> log + [[('color', 'red'), ('initialized', True)], 11] + +without affecting this thread's data: + + >>> mydata.number + 2 + >>> mydata.color + Traceback (most recent call last): + ... + AttributeError: 'MyLocal' object has no attribute 'color' + +Note that subclasses can define slots, but they are not thread +local. They are shared across threads: + + >>> class MyLocal(local): + ... __slots__ = 'number' + + >>> mydata = MyLocal() + >>> mydata.number = 42 + >>> mydata.color = 'red' + +So, the separate thread: + + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + +affects what we see: + + >>> mydata.number + 11 + +>>> del mydata +""" + +from weakref import ref +from contextlib import contextmanager + +__all__ = ["local"] + +# We need to use objects from the threading module, but the threading +# module may also want to use our `local` class, if support for locals +# isn't compiled in to the `thread` module. This creates potential problems +# with circular imports. For that reason, we don't import `threading` +# until the bottom of this file (a hack sufficient to worm around the +# potential problems). Note that all platforms on CPython do have support +# for locals in the `thread` module, and there is no circular import problem +# then, so problems introduced by fiddling the order of imports here won't +# manifest. + +class _localimpl: + """A class managing thread-local dicts""" + __slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__' + + def __init__(self): + # The key used in the Thread objects' attribute dicts. + # We keep it a string for speed but make it unlikely to clash with + # a "real" attribute. + self.key = '_threading_local._localimpl.' + str(id(self)) + # { id(Thread) -> (ref(Thread), thread-local dict) } + self.dicts = {} + + def get_dict(self): + """Return the dict for the current thread. Raises KeyError if none + defined.""" + thread = current_thread() + return self.dicts[id(thread)][1] + + def create_dict(self): + """Create a new dict for the current thread, and return it.""" + localdict = {} + key = self.key + thread = current_thread() + idt = id(thread) + def local_deleted(_, key=key): + # When the localimpl is deleted, remove the thread attribute. + thread = wrthread() + if thread is not None: + del thread.__dict__[key] + def thread_deleted(_, idt=idt): + # When the thread is deleted, remove the local dict. + # Note that this is suboptimal if the thread object gets + # caught in a reference loop. We would like to be called + # as soon as the OS-level thread ends instead. + local = wrlocal() + if local is not None: + dct = local.dicts.pop(idt) + wrlocal = ref(self, local_deleted) + wrthread = ref(thread, thread_deleted) + thread.__dict__[key] = wrlocal + self.dicts[idt] = wrthread, localdict + return localdict + + +@contextmanager +def _patch(self): + old = object.__getattribute__(self, '__dict__') + impl = object.__getattribute__(self, '_local__impl') + try: + dct = impl.get_dict() + except KeyError: + dct = impl.create_dict() + args, kw = impl.localargs + self.__init__(*args, **kw) + with impl.locallock: + object.__setattr__(self, '__dict__', dct) + yield + object.__setattr__(self, '__dict__', old) + + +class local: + __slots__ = '_local__impl', '__dict__' + + def __new__(cls, *args, **kw): + if (args or kw) and (cls.__init__ is object.__init__): + raise TypeError("Initialization arguments are not supported") + self = object.__new__(cls) + impl = _localimpl() + impl.localargs = (args, kw) + impl.locallock = RLock() + object.__setattr__(self, '_local__impl', impl) + # We need to create the thread dict in anticipation of + # __init__ being called, to make sure we don't call it + # again ourselves. + impl.create_dict() + return self + + def __getattribute__(self, name): + with _patch(self): + return object.__getattribute__(self, name) + + def __setattr__(self, name, value): + if name == '__dict__': + raise AttributeError( + "%r object attribute '__dict__' is read-only" + % self.__class__.__name__) + with _patch(self): + return object.__setattr__(self, name, value) + + def __delattr__(self, name): + if name == '__dict__': + raise AttributeError( + "%r object attribute '__dict__' is read-only" + % self.__class__.__name__) + with _patch(self): + return object.__delattr__(self, name) + + +from threading import current_thread, RLock diff --git a/Lib/aifc.py b/Lib/aifc.py new file mode 100644 index 0000000000..1916e7ef8e --- /dev/null +++ b/Lib/aifc.py @@ -0,0 +1,951 @@ +"""Stuff to parse AIFF-C and AIFF files. + +Unless explicitly stated otherwise, the description below is true +both for AIFF-C files and AIFF files. + +An AIFF-C file has the following structure. + + +-----------------+ + | FORM | + +-----------------+ + | | + +----+------------+ + | | AIFC | + | +------------+ + | | | + | | . | + | | . | + | | . | + +----+------------+ + +An AIFF file has the string "AIFF" instead of "AIFC". + +A chunk consists of an identifier (4 bytes) followed by a size (4 bytes, +big endian order), followed by the data. The size field does not include +the size of the 8 byte header. + +The following chunk types are recognized. + + FVER + (AIFF-C only). + MARK + <# of markers> (2 bytes) + list of markers: + (2 bytes, must be > 0) + (4 bytes) + ("pstring") + COMM + <# of channels> (2 bytes) + <# of sound frames> (4 bytes) + (2 bytes) + (10 bytes, IEEE 80-bit extended + floating point) + in AIFF-C files only: + (4 bytes) + ("pstring") + SSND + (4 bytes, not used by this program) + (4 bytes, not used by this program) + + +A pstring consists of 1 byte length, a string of characters, and 0 or 1 +byte pad to make the total length even. + +Usage. + +Reading AIFF files: + f = aifc.open(file, 'r') +where file is either the name of a file or an open file pointer. +The open file pointer must have methods read(), seek(), and close(). +In some types of audio files, if the setpos() method is not used, +the seek() method is not necessary. + +This returns an instance of a class with the following public methods: + getnchannels() -- returns number of audio channels (1 for + mono, 2 for stereo) + getsampwidth() -- returns sample width in bytes + getframerate() -- returns sampling frequency + getnframes() -- returns number of audio frames + getcomptype() -- returns compression type ('NONE' for AIFF files) + getcompname() -- returns human-readable version of + compression type ('not compressed' for AIFF files) + getparams() -- returns a namedtuple consisting of all of the + above in the above order + getmarkers() -- get the list of marks in the audio file or None + if there are no marks + getmark(id) -- get mark with the specified id (raises an error + if the mark does not exist) + readframes(n) -- returns at most n frames of audio + rewind() -- rewind to the beginning of the audio stream + setpos(pos) -- seek to the specified position + tell() -- return the current position + close() -- close the instance (make it unusable) +The position returned by tell(), the position given to setpos() and +the position of marks are all compatible and have nothing to do with +the actual position in the file. +The close() method is called automatically when the class instance +is destroyed. + +Writing AIFF files: + f = aifc.open(file, 'w') +where file is either the name of a file or an open file pointer. +The open file pointer must have methods write(), tell(), seek(), and +close(). + +This returns an instance of a class with the following public methods: + aiff() -- create an AIFF file (AIFF-C default) + aifc() -- create an AIFF-C file + setnchannels(n) -- set the number of channels + setsampwidth(n) -- set the sample width + setframerate(n) -- set the frame rate + setnframes(n) -- set the number of frames + setcomptype(type, name) + -- set the compression type and the + human-readable compression type + setparams(tuple) + -- set all parameters at once + setmark(id, pos, name) + -- add specified mark to the list of marks + tell() -- return current position in output file (useful + in combination with setmark()) + writeframesraw(data) + -- write audio frames without pathing up the + file header + writeframes(data) + -- write audio frames and patch up the file header + close() -- patch up the file header and close the + output file +You should set the parameters before the first writeframesraw or +writeframes. The total number of frames does not need to be set, +but when it is set to the correct value, the header does not have to +be patched up. +It is best to first set all parameters, perhaps possibly the +compression type, and then write audio frames using writeframesraw. +When all frames have been written, either call writeframes(b'') or +close() to patch up the sizes in the header. +Marks can be added anytime. If there are any marks, you must call +close() after all frames have been written. +The close() method is called automatically when the class instance +is destroyed. + +When a file is opened with the extension '.aiff', an AIFF file is +written, otherwise an AIFF-C file is written. This default can be +changed by calling aiff() or aifc() before the first writeframes or +writeframesraw. +""" + +import struct +import builtins +import warnings + +__all__ = ["Error", "open", "openfp"] + +class Error(Exception): + pass + +_AIFC_version = 0xA2805140 # Version 1 of AIFF-C + +def _read_long(file): + try: + return struct.unpack('>l', file.read(4))[0] + except struct.error: + raise EOFError from None + +def _read_ulong(file): + try: + return struct.unpack('>L', file.read(4))[0] + except struct.error: + raise EOFError from None + +def _read_short(file): + try: + return struct.unpack('>h', file.read(2))[0] + except struct.error: + raise EOFError from None + +def _read_ushort(file): + try: + return struct.unpack('>H', file.read(2))[0] + except struct.error: + raise EOFError from None + +def _read_string(file): + length = ord(file.read(1)) + if length == 0: + data = b'' + else: + data = file.read(length) + if length & 1 == 0: + dummy = file.read(1) + return data + +_HUGE_VAL = 1.79769313486231e+308 # See + +def _read_float(f): # 10 bytes + expon = _read_short(f) # 2 bytes + sign = 1 + if expon < 0: + sign = -1 + expon = expon + 0x8000 + himant = _read_ulong(f) # 4 bytes + lomant = _read_ulong(f) # 4 bytes + if expon == himant == lomant == 0: + f = 0.0 + elif expon == 0x7FFF: + f = _HUGE_VAL + else: + expon = expon - 16383 + f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63) + return sign * f + +def _write_short(f, x): + f.write(struct.pack('>h', x)) + +def _write_ushort(f, x): + f.write(struct.pack('>H', x)) + +def _write_long(f, x): + f.write(struct.pack('>l', x)) + +def _write_ulong(f, x): + f.write(struct.pack('>L', x)) + +def _write_string(f, s): + if len(s) > 255: + raise ValueError("string exceeds maximum pstring length") + f.write(struct.pack('B', len(s))) + f.write(s) + if len(s) & 1 == 0: + f.write(b'\x00') + +def _write_float(f, x): + import math + if x < 0: + sign = 0x8000 + x = x * -1 + else: + sign = 0 + if x == 0: + expon = 0 + himant = 0 + lomant = 0 + else: + fmant, expon = math.frexp(x) + if expon > 16384 or fmant >= 1 or fmant != fmant: # Infinity or NaN + expon = sign|0x7FFF + himant = 0 + lomant = 0 + else: # Finite + expon = expon + 16382 + if expon < 0: # denormalized + fmant = math.ldexp(fmant, expon) + expon = 0 + expon = expon | sign + fmant = math.ldexp(fmant, 32) + fsmant = math.floor(fmant) + himant = int(fsmant) + fmant = math.ldexp(fmant - fsmant, 32) + fsmant = math.floor(fmant) + lomant = int(fsmant) + _write_ushort(f, expon) + _write_ulong(f, himant) + _write_ulong(f, lomant) + +from chunk import Chunk +from collections import namedtuple + +_aifc_params = namedtuple('_aifc_params', + 'nchannels sampwidth framerate nframes comptype compname') + +_aifc_params.nchannels.__doc__ = 'Number of audio channels (1 for mono, 2 for stereo)' +_aifc_params.sampwidth.__doc__ = 'Sample width in bytes' +_aifc_params.framerate.__doc__ = 'Sampling frequency' +_aifc_params.nframes.__doc__ = 'Number of audio frames' +_aifc_params.comptype.__doc__ = 'Compression type ("NONE" for AIFF files)' +_aifc_params.compname.__doc__ = ("""\ +A human-readable version of the compression type +('not compressed' for AIFF files)""") + + +class Aifc_read: + # Variables used in this class: + # + # These variables are available to the user though appropriate + # methods of this class: + # _file -- the open file with methods read(), close(), and seek() + # set through the __init__() method + # _nchannels -- the number of audio channels + # available through the getnchannels() method + # _nframes -- the number of audio frames + # available through the getnframes() method + # _sampwidth -- the number of bytes per audio sample + # available through the getsampwidth() method + # _framerate -- the sampling frequency + # available through the getframerate() method + # _comptype -- the AIFF-C compression type ('NONE' if AIFF) + # available through the getcomptype() method + # _compname -- the human-readable AIFF-C compression type + # available through the getcomptype() method + # _markers -- the marks in the audio file + # available through the getmarkers() and getmark() + # methods + # _soundpos -- the position in the audio stream + # available through the tell() method, set through the + # setpos() method + # + # These variables are used internally only: + # _version -- the AIFF-C version number + # _decomp -- the decompressor from builtin module cl + # _comm_chunk_read -- 1 iff the COMM chunk has been read + # _aifc -- 1 iff reading an AIFF-C file + # _ssnd_seek_needed -- 1 iff positioned correctly in audio + # file for readframes() + # _ssnd_chunk -- instantiation of a chunk class for the SSND chunk + # _framesize -- size of one frame in the file + + _file = None # Set here since __del__ checks it + + def initfp(self, file): + self._version = 0 + self._convert = None + self._markers = [] + self._soundpos = 0 + self._file = file + chunk = Chunk(file) + if chunk.getname() != b'FORM': + raise Error('file does not start with FORM id') + formdata = chunk.read(4) + if formdata == b'AIFF': + self._aifc = 0 + elif formdata == b'AIFC': + self._aifc = 1 + else: + raise Error('not an AIFF or AIFF-C file') + self._comm_chunk_read = 0 + self._ssnd_chunk = None + while 1: + self._ssnd_seek_needed = 1 + try: + chunk = Chunk(self._file) + except EOFError: + break + chunkname = chunk.getname() + if chunkname == b'COMM': + self._read_comm_chunk(chunk) + self._comm_chunk_read = 1 + elif chunkname == b'SSND': + self._ssnd_chunk = chunk + dummy = chunk.read(8) + self._ssnd_seek_needed = 0 + elif chunkname == b'FVER': + self._version = _read_ulong(chunk) + elif chunkname == b'MARK': + self._readmark(chunk) + chunk.skip() + if not self._comm_chunk_read or not self._ssnd_chunk: + raise Error('COMM chunk and/or SSND chunk missing') + + def __init__(self, f): + if isinstance(f, str): + file_object = builtins.open(f, 'rb') + try: + self.initfp(file_object) + except: + file_object.close() + raise + else: + # assume it is an open file object already + self.initfp(f) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + # + # User visible methods. + # + def getfp(self): + return self._file + + def rewind(self): + self._ssnd_seek_needed = 1 + self._soundpos = 0 + + def close(self): + file = self._file + if file is not None: + self._file = None + file.close() + + def tell(self): + return self._soundpos + + def getnchannels(self): + return self._nchannels + + def getnframes(self): + return self._nframes + + def getsampwidth(self): + return self._sampwidth + + def getframerate(self): + return self._framerate + + def getcomptype(self): + return self._comptype + + def getcompname(self): + return self._compname + +## def getversion(self): +## return self._version + + def getparams(self): + return _aifc_params(self.getnchannels(), self.getsampwidth(), + self.getframerate(), self.getnframes(), + self.getcomptype(), self.getcompname()) + + def getmarkers(self): + if len(self._markers) == 0: + return None + return self._markers + + def getmark(self, id): + for marker in self._markers: + if id == marker[0]: + return marker + raise Error('marker {0!r} does not exist'.format(id)) + + def setpos(self, pos): + if pos < 0 or pos > self._nframes: + raise Error('position not in range') + self._soundpos = pos + self._ssnd_seek_needed = 1 + + def readframes(self, nframes): + if self._ssnd_seek_needed: + self._ssnd_chunk.seek(0) + dummy = self._ssnd_chunk.read(8) + pos = self._soundpos * self._framesize + if pos: + self._ssnd_chunk.seek(pos + 8) + self._ssnd_seek_needed = 0 + if nframes == 0: + return b'' + data = self._ssnd_chunk.read(nframes * self._framesize) + if self._convert and data: + data = self._convert(data) + self._soundpos = self._soundpos + len(data) // (self._nchannels + * self._sampwidth) + return data + + # + # Internal methods. + # + + def _alaw2lin(self, data): + import audioop + return audioop.alaw2lin(data, 2) + + def _ulaw2lin(self, data): + import audioop + return audioop.ulaw2lin(data, 2) + + def _adpcm2lin(self, data): + import audioop + if not hasattr(self, '_adpcmstate'): + # first time + self._adpcmstate = None + data, self._adpcmstate = audioop.adpcm2lin(data, 2, self._adpcmstate) + return data + + def _read_comm_chunk(self, chunk): + self._nchannels = _read_short(chunk) + self._nframes = _read_long(chunk) + self._sampwidth = (_read_short(chunk) + 7) // 8 + self._framerate = int(_read_float(chunk)) + if self._sampwidth <= 0: + raise Error('bad sample width') + if self._nchannels <= 0: + raise Error('bad # of channels') + self._framesize = self._nchannels * self._sampwidth + if self._aifc: + #DEBUG: SGI's soundeditor produces a bad size :-( + kludge = 0 + if chunk.chunksize == 18: + kludge = 1 + warnings.warn('Warning: bad COMM chunk size') + chunk.chunksize = 23 + #DEBUG end + self._comptype = chunk.read(4) + #DEBUG start + if kludge: + length = ord(chunk.file.read(1)) + if length & 1 == 0: + length = length + 1 + chunk.chunksize = chunk.chunksize + length + chunk.file.seek(-1, 1) + #DEBUG end + self._compname = _read_string(chunk) + if self._comptype != b'NONE': + if self._comptype == b'G722': + self._convert = self._adpcm2lin + elif self._comptype in (b'ulaw', b'ULAW'): + self._convert = self._ulaw2lin + elif self._comptype in (b'alaw', b'ALAW'): + self._convert = self._alaw2lin + else: + raise Error('unsupported compression type') + self._sampwidth = 2 + else: + self._comptype = b'NONE' + self._compname = b'not compressed' + + def _readmark(self, chunk): + nmarkers = _read_short(chunk) + # Some files appear to contain invalid counts. + # Cope with this by testing for EOF. + try: + for i in range(nmarkers): + id = _read_short(chunk) + pos = _read_long(chunk) + name = _read_string(chunk) + if pos or name: + # some files appear to have + # dummy markers consisting of + # a position 0 and name '' + self._markers.append((id, pos, name)) + except EOFError: + w = ('Warning: MARK chunk contains only %s marker%s instead of %s' % + (len(self._markers), '' if len(self._markers) == 1 else 's', + nmarkers)) + warnings.warn(w) + +class Aifc_write: + # Variables used in this class: + # + # These variables are user settable through appropriate methods + # of this class: + # _file -- the open file with methods write(), close(), tell(), seek() + # set through the __init__() method + # _comptype -- the AIFF-C compression type ('NONE' in AIFF) + # set through the setcomptype() or setparams() method + # _compname -- the human-readable AIFF-C compression type + # set through the setcomptype() or setparams() method + # _nchannels -- the number of audio channels + # set through the setnchannels() or setparams() method + # _sampwidth -- the number of bytes per audio sample + # set through the setsampwidth() or setparams() method + # _framerate -- the sampling frequency + # set through the setframerate() or setparams() method + # _nframes -- the number of audio frames written to the header + # set through the setnframes() or setparams() method + # _aifc -- whether we're writing an AIFF-C file or an AIFF file + # set through the aifc() method, reset through the + # aiff() method + # + # These variables are used internally only: + # _version -- the AIFF-C version number + # _comp -- the compressor from builtin module cl + # _nframeswritten -- the number of audio frames actually written + # _datalength -- the size of the audio samples written to the header + # _datawritten -- the size of the audio samples actually written + + _file = None # Set here since __del__ checks it + + def __init__(self, f): + if isinstance(f, str): + file_object = builtins.open(f, 'wb') + try: + self.initfp(file_object) + except: + file_object.close() + raise + + # treat .aiff file extensions as non-compressed audio + if f.endswith('.aiff'): + self._aifc = 0 + else: + # assume it is an open file object already + self.initfp(f) + + def initfp(self, file): + self._file = file + self._version = _AIFC_version + self._comptype = b'NONE' + self._compname = b'not compressed' + self._convert = None + self._nchannels = 0 + self._sampwidth = 0 + self._framerate = 0 + self._nframes = 0 + self._nframeswritten = 0 + self._datawritten = 0 + self._datalength = 0 + self._markers = [] + self._marklength = 0 + self._aifc = 1 # AIFF-C is default + + def __del__(self): + self.close() + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + # + # User visible methods. + # + def aiff(self): + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + self._aifc = 0 + + def aifc(self): + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + self._aifc = 1 + + def setnchannels(self, nchannels): + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + if nchannels < 1: + raise Error('bad # of channels') + self._nchannels = nchannels + + def getnchannels(self): + if not self._nchannels: + raise Error('number of channels not set') + return self._nchannels + + def setsampwidth(self, sampwidth): + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + if sampwidth < 1 or sampwidth > 4: + raise Error('bad sample width') + self._sampwidth = sampwidth + + def getsampwidth(self): + if not self._sampwidth: + raise Error('sample width not set') + return self._sampwidth + + def setframerate(self, framerate): + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + if framerate <= 0: + raise Error('bad frame rate') + self._framerate = framerate + + def getframerate(self): + if not self._framerate: + raise Error('frame rate not set') + return self._framerate + + def setnframes(self, nframes): + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + self._nframes = nframes + + def getnframes(self): + return self._nframeswritten + + def setcomptype(self, comptype, compname): + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + if comptype not in (b'NONE', b'ulaw', b'ULAW', + b'alaw', b'ALAW', b'G722'): + raise Error('unsupported compression type') + self._comptype = comptype + self._compname = compname + + def getcomptype(self): + return self._comptype + + def getcompname(self): + return self._compname + +## def setversion(self, version): +## if self._nframeswritten: +## raise Error, 'cannot change parameters after starting to write' +## self._version = version + + def setparams(self, params): + nchannels, sampwidth, framerate, nframes, comptype, compname = params + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + if comptype not in (b'NONE', b'ulaw', b'ULAW', + b'alaw', b'ALAW', b'G722'): + raise Error('unsupported compression type') + self.setnchannels(nchannels) + self.setsampwidth(sampwidth) + self.setframerate(framerate) + self.setnframes(nframes) + self.setcomptype(comptype, compname) + + def getparams(self): + if not self._nchannels or not self._sampwidth or not self._framerate: + raise Error('not all parameters set') + return _aifc_params(self._nchannels, self._sampwidth, self._framerate, + self._nframes, self._comptype, self._compname) + + def setmark(self, id, pos, name): + if id <= 0: + raise Error('marker ID must be > 0') + if pos < 0: + raise Error('marker position must be >= 0') + if not isinstance(name, bytes): + raise Error('marker name must be bytes') + for i in range(len(self._markers)): + if id == self._markers[i][0]: + self._markers[i] = id, pos, name + return + self._markers.append((id, pos, name)) + + def getmark(self, id): + for marker in self._markers: + if id == marker[0]: + return marker + raise Error('marker {0!r} does not exist'.format(id)) + + def getmarkers(self): + if len(self._markers) == 0: + return None + return self._markers + + def tell(self): + return self._nframeswritten + + def writeframesraw(self, data): + if not isinstance(data, (bytes, bytearray)): + data = memoryview(data).cast('B') + self._ensure_header_written(len(data)) + nframes = len(data) // (self._sampwidth * self._nchannels) + if self._convert: + data = self._convert(data) + self._file.write(data) + self._nframeswritten = self._nframeswritten + nframes + self._datawritten = self._datawritten + len(data) + + def writeframes(self, data): + self.writeframesraw(data) + if self._nframeswritten != self._nframes or \ + self._datalength != self._datawritten: + self._patchheader() + + def close(self): + if self._file is None: + return + try: + self._ensure_header_written(0) + if self._datawritten & 1: + # quick pad to even size + self._file.write(b'\x00') + self._datawritten = self._datawritten + 1 + self._writemarkers() + if self._nframeswritten != self._nframes or \ + self._datalength != self._datawritten or \ + self._marklength: + self._patchheader() + finally: + # Prevent ref cycles + self._convert = None + f = self._file + self._file = None + f.close() + + # + # Internal methods. + # + + def _lin2alaw(self, data): + import audioop + return audioop.lin2alaw(data, 2) + + def _lin2ulaw(self, data): + import audioop + return audioop.lin2ulaw(data, 2) + + def _lin2adpcm(self, data): + import audioop + if not hasattr(self, '_adpcmstate'): + self._adpcmstate = None + data, self._adpcmstate = audioop.lin2adpcm(data, 2, self._adpcmstate) + return data + + def _ensure_header_written(self, datasize): + if not self._nframeswritten: + if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'): + if not self._sampwidth: + self._sampwidth = 2 + if self._sampwidth != 2: + raise Error('sample width must be 2 when compressing ' + 'with ulaw/ULAW, alaw/ALAW or G7.22 (ADPCM)') + if not self._nchannels: + raise Error('# channels not specified') + if not self._sampwidth: + raise Error('sample width not specified') + if not self._framerate: + raise Error('sampling rate not specified') + self._write_header(datasize) + + def _init_compression(self): + if self._comptype == b'G722': + self._convert = self._lin2adpcm + elif self._comptype in (b'ulaw', b'ULAW'): + self._convert = self._lin2ulaw + elif self._comptype in (b'alaw', b'ALAW'): + self._convert = self._lin2alaw + + def _write_header(self, initlength): + if self._aifc and self._comptype != b'NONE': + self._init_compression() + self._file.write(b'FORM') + if not self._nframes: + self._nframes = initlength // (self._nchannels * self._sampwidth) + self._datalength = self._nframes * self._nchannels * self._sampwidth + if self._datalength & 1: + self._datalength = self._datalength + 1 + if self._aifc: + if self._comptype in (b'ulaw', b'ULAW', b'alaw', b'ALAW'): + self._datalength = self._datalength // 2 + if self._datalength & 1: + self._datalength = self._datalength + 1 + elif self._comptype == b'G722': + self._datalength = (self._datalength + 3) // 4 + if self._datalength & 1: + self._datalength = self._datalength + 1 + try: + self._form_length_pos = self._file.tell() + except (AttributeError, OSError): + self._form_length_pos = None + commlength = self._write_form_length(self._datalength) + if self._aifc: + self._file.write(b'AIFC') + self._file.write(b'FVER') + _write_ulong(self._file, 4) + _write_ulong(self._file, self._version) + else: + self._file.write(b'AIFF') + self._file.write(b'COMM') + _write_ulong(self._file, commlength) + _write_short(self._file, self._nchannels) + if self._form_length_pos is not None: + self._nframes_pos = self._file.tell() + _write_ulong(self._file, self._nframes) + if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'): + _write_short(self._file, 8) + else: + _write_short(self._file, self._sampwidth * 8) + _write_float(self._file, self._framerate) + if self._aifc: + self._file.write(self._comptype) + _write_string(self._file, self._compname) + self._file.write(b'SSND') + if self._form_length_pos is not None: + self._ssnd_length_pos = self._file.tell() + _write_ulong(self._file, self._datalength + 8) + _write_ulong(self._file, 0) + _write_ulong(self._file, 0) + + def _write_form_length(self, datalength): + if self._aifc: + commlength = 18 + 5 + len(self._compname) + if commlength & 1: + commlength = commlength + 1 + verslength = 12 + else: + commlength = 18 + verslength = 0 + _write_ulong(self._file, 4 + verslength + self._marklength + \ + 8 + commlength + 16 + datalength) + return commlength + + def _patchheader(self): + curpos = self._file.tell() + if self._datawritten & 1: + datalength = self._datawritten + 1 + self._file.write(b'\x00') + else: + datalength = self._datawritten + if datalength == self._datalength and \ + self._nframes == self._nframeswritten and \ + self._marklength == 0: + self._file.seek(curpos, 0) + return + self._file.seek(self._form_length_pos, 0) + dummy = self._write_form_length(datalength) + self._file.seek(self._nframes_pos, 0) + _write_ulong(self._file, self._nframeswritten) + self._file.seek(self._ssnd_length_pos, 0) + _write_ulong(self._file, datalength + 8) + self._file.seek(curpos, 0) + self._nframes = self._nframeswritten + self._datalength = datalength + + def _writemarkers(self): + if len(self._markers) == 0: + return + self._file.write(b'MARK') + length = 2 + for marker in self._markers: + id, pos, name = marker + length = length + len(name) + 1 + 6 + if len(name) & 1 == 0: + length = length + 1 + _write_ulong(self._file, length) + self._marklength = length + 8 + _write_short(self._file, len(self._markers)) + for marker in self._markers: + id, pos, name = marker + _write_short(self._file, id) + _write_ulong(self._file, pos) + _write_string(self._file, name) + +def open(f, mode=None): + if mode is None: + if hasattr(f, 'mode'): + mode = f.mode + else: + mode = 'rb' + if mode in ('r', 'rb'): + return Aifc_read(f) + elif mode in ('w', 'wb'): + return Aifc_write(f) + else: + raise Error("mode must be 'r', 'rb', 'w', or 'wb'") + +def openfp(f, mode=None): + warnings.warn("aifc.openfp is deprecated since Python 3.7. " + "Use aifc.open instead.", DeprecationWarning, stacklevel=2) + return open(f, mode=mode) + +if __name__ == '__main__': + import sys + if not sys.argv[1:]: + sys.argv.append('/usr/demos/data/audio/bach.aiff') + fn = sys.argv[1] + with open(fn, 'r') as f: + print("Reading", fn) + print("nchannels =", f.getnchannels()) + print("nframes =", f.getnframes()) + print("sampwidth =", f.getsampwidth()) + print("framerate =", f.getframerate()) + print("comptype =", f.getcomptype()) + print("compname =", f.getcompname()) + if sys.argv[2:]: + gn = sys.argv[2] + print("Writing", gn) + with open(gn, 'w') as g: + g.setparams(f.getparams()) + while 1: + data = f.readframes(1024) + if not data: + break + g.writeframes(data) + print("Done.") diff --git a/Lib/antigravity.py b/Lib/antigravity.py new file mode 100644 index 0000000000..9b14368037 --- /dev/null +++ b/Lib/antigravity.py @@ -0,0 +1,17 @@ + +import webbrowser +import hashlib + +webbrowser.open("https://xkcd.com/353/") + +def geohash(latitude, longitude, datedow): + '''Compute geohash() using the Munroe algorithm. + + >>> geohash(37.421542, -122.085589, b'2005-05-26-10458.68') + 37.857713 -122.544543 + + ''' + # http://xkcd.com/426/ + h = hashlib.md5(datedow).hexdigest() + p, q = [('%f' % float.fromhex('0.' + x)) for x in (h[:16], h[16:32])] + print('%d%s %d%s' % (latitude, p[1:], longitude, q[1:])) diff --git a/Lib/argparse.py b/Lib/argparse.py index a030749247..0d89682678 100644 --- a/Lib/argparse.py +++ b/Lib/argparse.py @@ -1629,12 +1629,18 @@ def __init__(self, conflict_handler='error', add_help=True, allow_abbrev=True): - - superinit = super(ArgumentParser, self).__init__ - superinit(description=description, - prefix_chars=prefix_chars, - argument_default=argument_default, - conflict_handler=conflict_handler) + _ActionsContainer.__init__(self, + description=description, + prefix_chars=prefix_chars, + argument_default=argument_default, + conflict_handler=conflict_handler) + # FIXME: get multiple inheritance method resolution right so we can use + # what's below instead of the modified version above + # superinit = super(ArgumentParser, self).__init__ + # superinit(description=description, + # prefix_chars=prefix_chars, + # argument_default=argument_default, + # conflict_handler=conflict_handler) # default setting for prog if prog is None: diff --git a/Lib/ast.py b/Lib/ast.py new file mode 100644 index 0000000000..bfe346bba8 --- /dev/null +++ b/Lib/ast.py @@ -0,0 +1,331 @@ +""" + ast + ~~~ + + The `ast` module helps Python applications to process trees of the Python + abstract syntax grammar. The abstract syntax itself might change with + each Python release; this module helps to find out programmatically what + the current grammar looks like and allows modifications of it. + + An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as + a flag to the `compile()` builtin function or by using the `parse()` + function from this module. The result will be a tree of objects whose + classes all inherit from `ast.AST`. + + A modified abstract syntax tree can be compiled into a Python code object + using the built-in `compile()` function. + + Additionally various helper functions are provided that make working with + the trees simpler. The main intention of the helper functions and this + module in general is to provide an easy to use interface for libraries + that work tightly with the python syntax (template engines for example). + + + :copyright: Copyright 2008 by Armin Ronacher. + :license: Python License. +""" +from _ast import * + + +def parse(source, filename='', mode='exec'): + """ + Parse the source into an AST node. + Equivalent to compile(source, filename, mode, PyCF_ONLY_AST). + """ + return compile(source, filename, mode, PyCF_ONLY_AST) + + +def literal_eval(node_or_string): + """ + Safely evaluate an expression node or a string containing a Python + expression. The string or node provided may only consist of the following + Python literal structures: strings, bytes, numbers, tuples, lists, dicts, + sets, booleans, and None. + """ + if isinstance(node_or_string, str): + node_or_string = parse(node_or_string, mode='eval') + if isinstance(node_or_string, Expression): + node_or_string = node_or_string.body + def _convert_num(node): + if isinstance(node, Constant): + if isinstance(node.value, (int, float, complex)): + return node.value + elif isinstance(node, Num): + return node.n + raise ValueError('malformed node or string: ' + repr(node)) + def _convert_signed_num(node): + if isinstance(node, UnaryOp) and isinstance(node.op, (UAdd, USub)): + operand = _convert_num(node.operand) + if isinstance(node.op, UAdd): + return + operand + else: + return - operand + return _convert_num(node) + def _convert(node): + if isinstance(node, Constant): + return node.value + elif isinstance(node, (Str, Bytes)): + return node.s + elif isinstance(node, Num): + return node.n + elif isinstance(node, Tuple): + return tuple(map(_convert, node.elts)) + elif isinstance(node, List): + return list(map(_convert, node.elts)) + elif isinstance(node, Set): + return set(map(_convert, node.elts)) + elif isinstance(node, Dict): + return dict(zip(map(_convert, node.keys), + map(_convert, node.values))) + elif isinstance(node, NameConstant): + return node.value + elif isinstance(node, BinOp) and isinstance(node.op, (Add, Sub)): + left = _convert_signed_num(node.left) + right = _convert_num(node.right) + if isinstance(left, (int, float)) and isinstance(right, complex): + if isinstance(node.op, Add): + return left + right + else: + return left - right + return _convert_signed_num(node) + return _convert(node_or_string) + + +def dump(node, annotate_fields=True, include_attributes=False): + """ + Return a formatted dump of the tree in *node*. This is mainly useful for + debugging purposes. The returned string will show the names and the values + for fields. This makes the code impossible to evaluate, so if evaluation is + wanted *annotate_fields* must be set to False. Attributes such as line + numbers and column offsets are not dumped by default. If this is wanted, + *include_attributes* can be set to True. + """ + def _format(node): + if isinstance(node, AST): + fields = [(a, _format(b)) for a, b in iter_fields(node)] + rv = '%s(%s' % (node.__class__.__name__, ', '.join( + ('%s=%s' % field for field in fields) + if annotate_fields else + (b for a, b in fields) + )) + if include_attributes and node._attributes: + rv += fields and ', ' or ' ' + rv += ', '.join('%s=%s' % (a, _format(getattr(node, a))) + for a in node._attributes) + return rv + ')' + elif isinstance(node, list): + return '[%s]' % ', '.join(_format(x) for x in node) + return repr(node) + if not isinstance(node, AST): + raise TypeError('expected AST, got %r' % node.__class__.__name__) + return _format(node) + + +def copy_location(new_node, old_node): + """ + Copy source location (`lineno` and `col_offset` attributes) from + *old_node* to *new_node* if possible, and return *new_node*. + """ + for attr in 'lineno', 'col_offset': + if attr in old_node._attributes and attr in new_node._attributes \ + and hasattr(old_node, attr): + setattr(new_node, attr, getattr(old_node, attr)) + return new_node + + +def fix_missing_locations(node): + """ + When you compile a node tree with compile(), the compiler expects lineno and + col_offset attributes for every node that supports them. This is rather + tedious to fill in for generated nodes, so this helper adds these attributes + recursively where not already set, by setting them to the values of the + parent node. It works recursively starting at *node*. + """ + def _fix(node, lineno, col_offset): + if 'lineno' in node._attributes: + if not hasattr(node, 'lineno'): + node.lineno = lineno + else: + lineno = node.lineno + if 'col_offset' in node._attributes: + if not hasattr(node, 'col_offset'): + node.col_offset = col_offset + else: + col_offset = node.col_offset + for child in iter_child_nodes(node): + _fix(child, lineno, col_offset) + _fix(node, 1, 0) + return node + + +def increment_lineno(node, n=1): + """ + Increment the line number of each node in the tree starting at *node* by *n*. + This is useful to "move code" to a different location in a file. + """ + for child in walk(node): + if 'lineno' in child._attributes: + child.lineno = getattr(child, 'lineno', 0) + n + return node + + +def iter_fields(node): + """ + Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields`` + that is present on *node*. + """ + for field in node._fields: + try: + yield field, getattr(node, field) + except AttributeError: + pass + + +def iter_child_nodes(node): + """ + Yield all direct child nodes of *node*, that is, all fields that are nodes + and all items of fields that are lists of nodes. + """ + for name, field in iter_fields(node): + if isinstance(field, AST): + yield field + elif isinstance(field, list): + for item in field: + if isinstance(item, AST): + yield item + + +def get_docstring(node, clean=True): + """ + Return the docstring for the given node or None if no docstring can + be found. If the node provided does not have docstrings a TypeError + will be raised. + + If *clean* is `True`, all tabs are expanded to spaces and any whitespace + that can be uniformly removed from the second line onwards is removed. + """ + if not isinstance(node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)): + raise TypeError("%r can't have docstrings" % node.__class__.__name__) + if not(node.body and isinstance(node.body[0], Expr)): + return None + node = node.body[0].value + if isinstance(node, Str): + text = node.s + elif isinstance(node, Constant) and isinstance(node.value, str): + text = node.value + else: + return None + if clean: + import inspect + text = inspect.cleandoc(text) + return text + + +def walk(node): + """ + Recursively yield all descendant nodes in the tree starting at *node* + (including *node* itself), in no specified order. This is useful if you + only want to modify nodes in place and don't care about the context. + """ + from collections import deque + todo = deque([node]) + while todo: + node = todo.popleft() + todo.extend(iter_child_nodes(node)) + yield node + + +class NodeVisitor(object): + """ + A node visitor base class that walks the abstract syntax tree and calls a + visitor function for every node found. This function may return a value + which is forwarded by the `visit` method. + + This class is meant to be subclassed, with the subclass adding visitor + methods. + + Per default the visitor functions for the nodes are ``'visit_'`` + + class name of the node. So a `TryFinally` node visit function would + be `visit_TryFinally`. This behavior can be changed by overriding + the `visit` method. If no visitor function exists for a node + (return value `None`) the `generic_visit` visitor is used instead. + + Don't use the `NodeVisitor` if you want to apply changes to nodes during + traversing. For this a special visitor exists (`NodeTransformer`) that + allows modifications. + """ + + def visit(self, node): + """Visit a node.""" + method = 'visit_' + node.__class__.__name__ + visitor = getattr(self, method, self.generic_visit) + return visitor(node) + + def generic_visit(self, node): + """Called if no explicit visitor function exists for a node.""" + for field, value in iter_fields(node): + if isinstance(value, list): + for item in value: + if isinstance(item, AST): + self.visit(item) + elif isinstance(value, AST): + self.visit(value) + + +class NodeTransformer(NodeVisitor): + """ + A :class:`NodeVisitor` subclass that walks the abstract syntax tree and + allows modification of nodes. + + The `NodeTransformer` will walk the AST and use the return value of the + visitor methods to replace or remove the old node. If the return value of + the visitor method is ``None``, the node will be removed from its location, + otherwise it is replaced with the return value. The return value may be the + original node in which case no replacement takes place. + + Here is an example transformer that rewrites all occurrences of name lookups + (``foo``) to ``data['foo']``:: + + class RewriteName(NodeTransformer): + + def visit_Name(self, node): + return copy_location(Subscript( + value=Name(id='data', ctx=Load()), + slice=Index(value=Str(s=node.id)), + ctx=node.ctx + ), node) + + Keep in mind that if the node you're operating on has child nodes you must + either transform the child nodes yourself or call the :meth:`generic_visit` + method for the node first. + + For nodes that were part of a collection of statements (that applies to all + statement nodes), the visitor may also return a list of nodes rather than + just a single node. + + Usually you use the transformer like this:: + + node = YourTransformer().visit(node) + """ + + def generic_visit(self, node): + for field, old_value in iter_fields(node): + if isinstance(old_value, list): + new_values = [] + for value in old_value: + if isinstance(value, AST): + value = self.visit(value) + if value is None: + continue + elif not isinstance(value, AST): + new_values.extend(value) + continue + new_values.append(value) + old_value[:] = new_values + elif isinstance(old_value, AST): + new_node = self.visit(old_value) + if new_node is None: + delattr(node, field) + else: + setattr(node, field, new_node) + return node diff --git a/Lib/asyncio/__init__.py b/Lib/asyncio/__init__.py new file mode 100644 index 0000000000..ff69378ba9 --- /dev/null +++ b/Lib/asyncio/__init__.py @@ -0,0 +1,48 @@ +"""The asyncio package, tracking PEP 3156.""" + +# flake8: noqa +import sys + +import selectors +# XXX RustPython TODO: _overlapped +if sys.platform == 'win32' and False: + # Similar thing for _overlapped. + try: + from . import _overlapped + except ImportError: + import _overlapped # Will also be exported. + + +# This relies on each of the submodules having an __all__ variable. +from .base_events import * +from .coroutines import * +from .events import * +from .futures import * +from .locks import * +from .protocols import * +from .runners import * +from .queues import * +from .streams import * +from .subprocess import * +from .tasks import * +from .transports import * + +__all__ = (base_events.__all__ + + coroutines.__all__ + + events.__all__ + + futures.__all__ + + locks.__all__ + + protocols.__all__ + + runners.__all__ + + queues.__all__ + + streams.__all__ + + subprocess.__all__ + + tasks.__all__ + + transports.__all__) + +if sys.platform == 'win32': # pragma: no cover + from .windows_events import * + __all__ += windows_events.__all__ +else: + from .unix_events import * # pragma: no cover + __all__ += unix_events.__all__ diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py new file mode 100644 index 0000000000..2df379933c --- /dev/null +++ b/Lib/asyncio/base_events.py @@ -0,0 +1,1468 @@ +"""Base implementation of event loop. + +The event loop can be broken up into a multiplexer (the part +responsible for notifying us of I/O events) and the event loop proper, +which wraps a multiplexer with functionality for scheduling callbacks, +immediately or at a given time in the future. + +Whenever a public API takes a callback, subsequent positional +arguments will be passed to the callback if/when it is called. This +avoids the proliferation of trivial lambdas implementing closures. +Keyword arguments for the callback are not supported; this is a +conscious design decision, leaving the door open for keyword arguments +to modify the meaning of the API call itself. +""" + +import collections +import concurrent.futures +import heapq +import inspect +import itertools +import logging +import os +import socket +import subprocess +import threading +import time +import traceback +import sys +import warnings +import weakref + +from . import compat +from . import coroutines +from . import events +from . import futures +from . import tasks +from .coroutines import coroutine +from .log import logger + + +__all__ = ['BaseEventLoop'] + + +# Minimum number of _scheduled timer handles before cleanup of +# cancelled handles is performed. +_MIN_SCHEDULED_TIMER_HANDLES = 100 + +# Minimum fraction of _scheduled timer handles that are cancelled +# before cleanup of cancelled handles is performed. +_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5 + +# Exceptions which must not call the exception handler in fatal error +# methods (_fatal_error()) +_FATAL_ERROR_IGNORE = (BrokenPipeError, + ConnectionResetError, ConnectionAbortedError) + + +def _format_handle(handle): + cb = handle._callback + if isinstance(getattr(cb, '__self__', None), tasks.Task): + # format the task + return repr(cb.__self__) + else: + return str(handle) + + +def _format_pipe(fd): + if fd == subprocess.PIPE: + return '' + elif fd == subprocess.STDOUT: + return '' + else: + return repr(fd) + + +def _set_reuseport(sock): + if not hasattr(socket, 'SO_REUSEPORT'): + raise ValueError('reuse_port not supported by socket module') + else: + try: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + except OSError: + raise ValueError('reuse_port not supported by socket module, ' + 'SO_REUSEPORT defined but not implemented.') + + +def _is_stream_socket(sock): + # Linux's socket.type is a bitmask that can include extra info + # about socket, therefore we can't do simple + # `sock_type == socket.SOCK_STREAM`. + return (sock.type & socket.SOCK_STREAM) == socket.SOCK_STREAM + + +def _is_dgram_socket(sock): + # Linux's socket.type is a bitmask that can include extra info + # about socket, therefore we can't do simple + # `sock_type == socket.SOCK_DGRAM`. + return (sock.type & socket.SOCK_DGRAM) == socket.SOCK_DGRAM + + +def _ipaddr_info(host, port, family, type, proto): + # Try to skip getaddrinfo if "host" is already an IP. Users might have + # handled name resolution in their own code and pass in resolved IPs. + if not hasattr(socket, 'inet_pton'): + return + + if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \ + host is None: + return None + + if type == socket.SOCK_STREAM: + # Linux only: + # getaddrinfo() can raise when socket.type is a bit mask. + # So if socket.type is a bit mask of SOCK_STREAM, and say + # SOCK_NONBLOCK, we simply return None, which will trigger + # a call to getaddrinfo() letting it process this request. + proto = socket.IPPROTO_TCP + elif type == socket.SOCK_DGRAM: + proto = socket.IPPROTO_UDP + else: + return None + + if port is None: + port = 0 + elif isinstance(port, bytes) and port == b'': + port = 0 + elif isinstance(port, str) and port == '': + port = 0 + else: + # If port's a service name like "http", don't skip getaddrinfo. + try: + port = int(port) + except (TypeError, ValueError): + return None + + if family == socket.AF_UNSPEC: + afs = [socket.AF_INET] + if hasattr(socket, 'AF_INET6'): + afs.append(socket.AF_INET6) + else: + afs = [family] + + if isinstance(host, bytes): + host = host.decode('idna') + if '%' in host: + # Linux's inet_pton doesn't accept an IPv6 zone index after host, + # like '::1%lo0'. + return None + + for af in afs: + try: + socket.inet_pton(af, host) + # The host has already been resolved. + return af, type, proto, '', (host, port) + except OSError: + pass + + # "host" is not an IP address. + return None + + +def _ensure_resolved(address, *, family=0, type=socket.SOCK_STREAM, proto=0, + flags=0, loop): + host, port = address[:2] + info = _ipaddr_info(host, port, family, type, proto) + if info is not None: + # "host" is already a resolved IP. + fut = loop.create_future() + fut.set_result([info]) + return fut + else: + return loop.getaddrinfo(host, port, family=family, type=type, + proto=proto, flags=flags) + + +def _run_until_complete_cb(fut): + exc = fut._exception + if (isinstance(exc, BaseException) + and not isinstance(exc, Exception)): + # Issue #22429: run_forever() already finished, no need to + # stop it. + return + fut._loop.stop() + + +class Server(events.AbstractServer): + + def __init__(self, loop, sockets): + self._loop = loop + self.sockets = sockets + self._active_count = 0 + self._waiters = [] + + def __repr__(self): + return '<%s sockets=%r>' % (self.__class__.__name__, self.sockets) + + def _attach(self): + assert self.sockets is not None + self._active_count += 1 + + def _detach(self): + assert self._active_count > 0 + self._active_count -= 1 + if self._active_count == 0 and self.sockets is None: + self._wakeup() + + def close(self): + sockets = self.sockets + if sockets is None: + return + self.sockets = None + for sock in sockets: + self._loop._stop_serving(sock) + if self._active_count == 0: + self._wakeup() + + def _wakeup(self): + waiters = self._waiters + self._waiters = None + for waiter in waiters: + if not waiter.done(): + waiter.set_result(waiter) + + @coroutine + def wait_closed(self): + if self.sockets is None or self._waiters is None: + return + waiter = self._loop.create_future() + self._waiters.append(waiter) + yield from waiter + + +class BaseEventLoop(events.AbstractEventLoop): + + def __init__(self): + self._timer_cancelled_count = 0 + self._closed = False + self._stopping = False + self._ready = collections.deque() + self._scheduled = [] + self._default_executor = None + self._internal_fds = 0 + # Identifier of the thread running the event loop, or None if the + # event loop is not running + self._thread_id = None + self._clock_resolution = 1e-06 #time.get_clock_info('monotonic').resolution + self._exception_handler = None + self.set_debug((not sys.flags.ignore_environment + and bool(os.environ.get('PYTHONASYNCIODEBUG')))) + # In debug mode, if the execution of a callback or a step of a task + # exceed this duration in seconds, the slow callback/task is logged. + self.slow_callback_duration = 0.1 + self._current_handle = None + self._task_factory = None + self._coroutine_wrapper_set = False + + if hasattr(sys, 'get_asyncgen_hooks'): + # Python >= 3.6 + # A weak set of all asynchronous generators that are + # being iterated by the loop. + self._asyncgens = weakref.WeakSet() + else: + self._asyncgens = None + + # Set to True when `loop.shutdown_asyncgens` is called. + self._asyncgens_shutdown_called = False + + def __repr__(self): + return ('<%s running=%s closed=%s debug=%s>' + % (self.__class__.__name__, self.is_running(), + self.is_closed(), self.get_debug())) + + def create_future(self): + """Create a Future object attached to the loop.""" + return futures.Future(loop=self) + + def create_task(self, coro): + """Schedule a coroutine object. + + Return a task object. + """ + self._check_closed() + if self._task_factory is None: + task = tasks.Task(coro, loop=self) + if task._source_traceback: + del task._source_traceback[-1] + else: + task = self._task_factory(self, coro) + return task + + def set_task_factory(self, factory): + """Set a task factory that will be used by loop.create_task(). + + If factory is None the default task factory will be set. + + If factory is a callable, it should have a signature matching + '(loop, coro)', where 'loop' will be a reference to the active + event loop, 'coro' will be a coroutine object. The callable + must return a Future. + """ + if factory is not None and not callable(factory): + raise TypeError('task factory must be a callable or None') + self._task_factory = factory + + def get_task_factory(self): + """Return a task factory, or None if the default one is in use.""" + return self._task_factory + + def _make_socket_transport(self, sock, protocol, waiter=None, *, + extra=None, server=None): + """Create socket transport.""" + raise NotImplementedError + + def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None, + *, server_side=False, server_hostname=None, + extra=None, server=None): + """Create SSL transport.""" + raise NotImplementedError + + def _make_datagram_transport(self, sock, protocol, + address=None, waiter=None, extra=None): + """Create datagram transport.""" + raise NotImplementedError + + def _make_read_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + """Create read pipe transport.""" + raise NotImplementedError + + def _make_write_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + """Create write pipe transport.""" + raise NotImplementedError + + @coroutine + def _make_subprocess_transport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + extra=None, **kwargs): + """Create subprocess transport.""" + raise NotImplementedError + + def _write_to_self(self): + """Write a byte to self-pipe, to wake up the event loop. + + This may be called from a different thread. + + The subclass is responsible for implementing the self-pipe. + """ + raise NotImplementedError + + def _process_events(self, event_list): + """Process selector events.""" + raise NotImplementedError + + def _check_closed(self): + if self._closed: + raise RuntimeError('Event loop is closed') + + def _asyncgen_finalizer_hook(self, agen): + self._asyncgens.discard(agen) + if not self.is_closed(): + self.create_task(agen.aclose()) + # Wake up the loop if the finalizer was called from + # a different thread. + self._write_to_self() + + def _asyncgen_firstiter_hook(self, agen): + if self._asyncgens_shutdown_called: + warnings.warn( + "asynchronous generator {!r} was scheduled after " + "loop.shutdown_asyncgens() call".format(agen), + ResourceWarning, source=self) + + self._asyncgens.add(agen) + + @coroutine + def shutdown_asyncgens(self): + """Shutdown all active asynchronous generators.""" + self._asyncgens_shutdown_called = True + + if self._asyncgens is None or not len(self._asyncgens): + # If Python version is <3.6 or we don't have any asynchronous + # generators alive. + return + + closing_agens = list(self._asyncgens) + self._asyncgens.clear() + + shutdown_coro = tasks.gather( + *[ag.aclose() for ag in closing_agens], + return_exceptions=True, + loop=self) + + results = yield from shutdown_coro + for result, agen in zip(results, closing_agens): + if isinstance(result, Exception): + self.call_exception_handler({ + 'message': 'an error occurred during closing of ' + 'asynchronous generator {!r}'.format(agen), + 'exception': result, + 'asyncgen': agen + }) + + def run_forever(self): + """Run until stop() is called.""" + self._check_closed() + if self.is_running(): + raise RuntimeError('This event loop is already running') + if events._get_running_loop() is not None: + raise RuntimeError( + 'Cannot run the event loop while another loop is running') + self._set_coroutine_wrapper(self._debug) + self._thread_id = threading.get_ident() + if self._asyncgens is not None: + old_agen_hooks = sys.get_asyncgen_hooks() + sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook, + finalizer=self._asyncgen_finalizer_hook) + try: + events._set_running_loop(self) + while True: + self._run_once() + if self._stopping: + break + finally: + self._stopping = False + self._thread_id = None + events._set_running_loop(None) + self._set_coroutine_wrapper(False) + if self._asyncgens is not None: + sys.set_asyncgen_hooks(*old_agen_hooks) + + def run_until_complete(self, future): + """Run until the Future is done. + + If the argument is a coroutine, it is wrapped in a Task. + + WARNING: It would be disastrous to call run_until_complete() + with the same coroutine twice -- it would wrap it in two + different Tasks and that can't be good. + + Return the Future's result, or raise its exception. + """ + self._check_closed() + + new_task = not futures.isfuture(future) + future = tasks.ensure_future(future, loop=self) + if new_task: + # An exception is raised if the future didn't complete, so there + # is no need to log the "destroy pending task" message + future._log_destroy_pending = False + + future.add_done_callback(_run_until_complete_cb) + try: + self.run_forever() + except: + if new_task and future.done() and not future.cancelled(): + # The coroutine raised a BaseException. Consume the exception + # to not log a warning, the caller doesn't have access to the + # local task. + future.exception() + raise + future.remove_done_callback(_run_until_complete_cb) + if not future.done(): + raise RuntimeError('Event loop stopped before Future completed.') + + return future.result() + + def stop(self): + """Stop running the event loop. + + Every callback already scheduled will still run. This simply informs + run_forever to stop looping after a complete iteration. + """ + self._stopping = True + + def close(self): + """Close the event loop. + + This clears the queues and shuts down the executor, + but does not wait for the executor to finish. + + The event loop must not be running. + """ + if self.is_running(): + raise RuntimeError("Cannot close a running event loop") + if self._closed: + return + if self._debug: + logger.debug("Close %r", self) + self._closed = True + self._ready.clear() + self._scheduled.clear() + executor = self._default_executor + if executor is not None: + self._default_executor = None + executor.shutdown(wait=False) + + def is_closed(self): + """Returns True if the event loop was closed.""" + return self._closed + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if not self.is_closed(): + warnings.warn("unclosed event loop %r" % self, ResourceWarning, + source=self) + if not self.is_running(): + self.close() + + def is_running(self): + """Returns True if the event loop is running.""" + return (self._thread_id is not None) + + def time(self): + """Return the time according to the event loop's clock. + + This is a float expressed in seconds since an epoch, but the + epoch, precision, accuracy and drift are unspecified and may + differ per event loop. + """ + return time.monotonic() + + def call_later(self, delay, callback, *args): + """Arrange for a callback to be called at a given time. + + Return a Handle: an opaque object with a cancel() method that + can be used to cancel the call. + + The delay can be an int or float, expressed in seconds. It is + always relative to the current time. + + Each callback will be called exactly once. If two callbacks + are scheduled for exactly the same time, it undefined which + will be called first. + + Any positional arguments after the callback will be passed to + the callback when it is called. + """ + timer = self.call_at(self.time() + delay, callback, *args) + if timer._source_traceback: + del timer._source_traceback[-1] + return timer + + def call_at(self, when, callback, *args): + """Like call_later(), but uses an absolute time. + + Absolute time corresponds to the event loop's time() method. + """ + self._check_closed() + if self._debug: + self._check_thread() + self._check_callback(callback, 'call_at') + timer = events.TimerHandle(when, callback, args, self) + if timer._source_traceback: + del timer._source_traceback[-1] + heapq.heappush(self._scheduled, timer) + timer._scheduled = True + return timer + + def call_soon(self, callback, *args): + """Arrange for a callback to be called as soon as possible. + + This operates as a FIFO queue: callbacks are called in the + order in which they are registered. Each callback will be + called exactly once. + + Any positional arguments after the callback will be passed to + the callback when it is called. + """ + self._check_closed() + if self._debug: + self._check_thread() + self._check_callback(callback, 'call_soon') + handle = self._call_soon(callback, args) + if handle._source_traceback: + del handle._source_traceback[-1] + return handle + + def _check_callback(self, callback, method): + if (coroutines.iscoroutine(callback) or + coroutines.iscoroutinefunction(callback)): + raise TypeError( + "coroutines cannot be used with {}()".format(method)) + if not callable(callback): + raise TypeError( + 'a callable object was expected by {}(), got {!r}'.format( + method, callback)) + + + def _call_soon(self, callback, args): + handle = events.Handle(callback, args, self) + if handle._source_traceback: + del handle._source_traceback[-1] + self._ready.append(handle) + return handle + + def _check_thread(self): + """Check that the current thread is the thread running the event loop. + + Non-thread-safe methods of this class make this assumption and will + likely behave incorrectly when the assumption is violated. + + Should only be called when (self._debug == True). The caller is + responsible for checking this condition for performance reasons. + """ + if self._thread_id is None: + return + thread_id = threading.get_ident() + if thread_id != self._thread_id: + raise RuntimeError( + "Non-thread-safe operation invoked on an event loop other " + "than the current one") + + def call_soon_threadsafe(self, callback, *args): + """Like call_soon(), but thread-safe.""" + self._check_closed() + if self._debug: + self._check_callback(callback, 'call_soon_threadsafe') + handle = self._call_soon(callback, args) + if handle._source_traceback: + del handle._source_traceback[-1] + self._write_to_self() + return handle + + def run_in_executor(self, executor, func, *args): + self._check_closed() + if self._debug: + self._check_callback(func, 'run_in_executor') + if executor is None: + executor = self._default_executor + if executor is None: + executor = concurrent.futures.ThreadPoolExecutor() + self._default_executor = executor + return futures.wrap_future(executor.submit(func, *args), loop=self) + + def set_default_executor(self, executor): + self._default_executor = executor + + def _getaddrinfo_debug(self, host, port, family, type, proto, flags): + msg = ["%s:%r" % (host, port)] + if family: + msg.append('family=%r' % family) + if type: + msg.append('type=%r' % type) + if proto: + msg.append('proto=%r' % proto) + if flags: + msg.append('flags=%r' % flags) + msg = ', '.join(msg) + logger.debug('Get address info %s', msg) + + t0 = self.time() + addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags) + dt = self.time() - t0 + + msg = ('Getting address info %s took %.3f ms: %r' + % (msg, dt * 1e3, addrinfo)) + if dt >= self.slow_callback_duration: + logger.info(msg) + else: + logger.debug(msg) + return addrinfo + + def getaddrinfo(self, host, port, *, + family=0, type=0, proto=0, flags=0): + if self._debug: + return self.run_in_executor(None, self._getaddrinfo_debug, + host, port, family, type, proto, flags) + else: + return self.run_in_executor(None, socket.getaddrinfo, + host, port, family, type, proto, flags) + + def getnameinfo(self, sockaddr, flags=0): + return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags) + + @coroutine + def create_connection(self, protocol_factory, host=None, port=None, *, + ssl=None, family=0, proto=0, flags=0, sock=None, + local_addr=None, server_hostname=None): + """Connect to a TCP server. + + Create a streaming transport connection to a given Internet host and + port: socket family AF_INET or socket.AF_INET6 depending on host (or + family if specified), socket type SOCK_STREAM. protocol_factory must be + a callable returning a protocol instance. + + This method is a coroutine which will try to establish the connection + in the background. When successful, the coroutine returns a + (transport, protocol) pair. + """ + if server_hostname is not None and not ssl: + raise ValueError('server_hostname is only meaningful with ssl') + + if server_hostname is None and ssl: + # Use host as default for server_hostname. It is an error + # if host is empty or not set, e.g. when an + # already-connected socket was passed or when only a port + # is given. To avoid this error, you can pass + # server_hostname='' -- this will bypass the hostname + # check. (This also means that if host is a numeric + # IP/IPv6 address, we will attempt to verify that exact + # address; this will probably fail, but it is possible to + # create a certificate for a specific IP address, so we + # don't judge it here.) + if not host: + raise ValueError('You must set server_hostname ' + 'when using ssl without a host') + server_hostname = host + + if host is not None or port is not None: + if sock is not None: + raise ValueError( + 'host/port and sock can not be specified at the same time') + + f1 = _ensure_resolved((host, port), family=family, + type=socket.SOCK_STREAM, proto=proto, + flags=flags, loop=self) + fs = [f1] + if local_addr is not None: + f2 = _ensure_resolved(local_addr, family=family, + type=socket.SOCK_STREAM, proto=proto, + flags=flags, loop=self) + fs.append(f2) + else: + f2 = None + + yield from tasks.wait(fs, loop=self) + + infos = f1.result() + if not infos: + raise OSError('getaddrinfo() returned empty list') + if f2 is not None: + laddr_infos = f2.result() + if not laddr_infos: + raise OSError('getaddrinfo() returned empty list') + + exceptions = [] + for family, type, proto, cname, address in infos: + try: + sock = socket.socket(family=family, type=type, proto=proto) + sock.setblocking(False) + if f2 is not None: + for _, _, _, _, laddr in laddr_infos: + try: + sock.bind(laddr) + break + except OSError as exc: + exc = OSError( + exc.errno, 'error while ' + 'attempting to bind on address ' + '{!r}: {}'.format( + laddr, exc.strerror.lower())) + exceptions.append(exc) + else: + sock.close() + sock = None + continue + if self._debug: + logger.debug("connect %r to %r", sock, address) + yield from self.sock_connect(sock, address) + except OSError as exc: + if sock is not None: + sock.close() + exceptions.append(exc) + except: + if sock is not None: + sock.close() + raise + else: + break + else: + if len(exceptions) == 1: + raise exceptions[0] + else: + # If they all have the same str(), raise one. + model = str(exceptions[0]) + if all(str(exc) == model for exc in exceptions): + raise exceptions[0] + # Raise a combined exception so the user can see all + # the various error messages. + raise OSError('Multiple exceptions: {}'.format( + ', '.join(str(exc) for exc in exceptions))) + + else: + if sock is None: + raise ValueError( + 'host and port was not specified and no sock specified') + if not _is_stream_socket(sock): + # We allow AF_INET, AF_INET6, AF_UNIX as long as they + # are SOCK_STREAM. + # We support passing AF_UNIX sockets even though we have + # a dedicated API for that: create_unix_connection. + # Disallowing AF_UNIX in this method, breaks backwards + # compatibility. + raise ValueError( + 'A Stream Socket was expected, got {!r}'.format(sock)) + + transport, protocol = yield from self._create_connection_transport( + sock, protocol_factory, ssl, server_hostname) + if self._debug: + # Get the socket from the transport because SSL transport closes + # the old socket and creates a new SSL socket + sock = transport.get_extra_info('socket') + logger.debug("%r connected to %s:%r: (%r, %r)", + sock, host, port, transport, protocol) + return transport, protocol + + @coroutine + def _create_connection_transport(self, sock, protocol_factory, ssl, + server_hostname, server_side=False): + + sock.setblocking(False) + + protocol = protocol_factory() + waiter = self.create_future() + if ssl: + sslcontext = None if isinstance(ssl, bool) else ssl + transport = self._make_ssl_transport( + sock, protocol, sslcontext, waiter, + server_side=server_side, server_hostname=server_hostname) + else: + transport = self._make_socket_transport(sock, protocol, waiter) + + try: + yield from waiter + except: + transport.close() + raise + + return transport, protocol + + @coroutine + def create_datagram_endpoint(self, protocol_factory, + local_addr=None, remote_addr=None, *, + family=0, proto=0, flags=0, + reuse_address=None, reuse_port=None, + allow_broadcast=None, sock=None): + """Create datagram connection.""" + if sock is not None: + if not _is_dgram_socket(sock): + raise ValueError( + 'A UDP Socket was expected, got {!r}'.format(sock)) + if (local_addr or remote_addr or + family or proto or flags or + reuse_address or reuse_port or allow_broadcast): + # show the problematic kwargs in exception msg + opts = dict(local_addr=local_addr, remote_addr=remote_addr, + family=family, proto=proto, flags=flags, + reuse_address=reuse_address, reuse_port=reuse_port, + allow_broadcast=allow_broadcast) + problems = ', '.join( + '{}={}'.format(k, v) for k, v in opts.items() if v) + raise ValueError( + 'socket modifier keyword arguments can not be used ' + 'when sock is specified. ({})'.format(problems)) + sock.setblocking(False) + r_addr = None + else: + if not (local_addr or remote_addr): + if family == 0: + raise ValueError('unexpected address family') + addr_pairs_info = (((family, proto), (None, None)),) + else: + # join address by (family, protocol) + addr_infos = collections.OrderedDict() + for idx, addr in ((0, local_addr), (1, remote_addr)): + if addr is not None: + assert isinstance(addr, tuple) and len(addr) == 2, ( + '2-tuple is expected') + + infos = yield from _ensure_resolved( + addr, family=family, type=socket.SOCK_DGRAM, + proto=proto, flags=flags, loop=self) + if not infos: + raise OSError('getaddrinfo() returned empty list') + + for fam, _, pro, _, address in infos: + key = (fam, pro) + if key not in addr_infos: + addr_infos[key] = [None, None] + addr_infos[key][idx] = address + + # each addr has to have info for each (family, proto) pair + addr_pairs_info = [ + (key, addr_pair) for key, addr_pair in addr_infos.items() + if not ((local_addr and addr_pair[0] is None) or + (remote_addr and addr_pair[1] is None))] + + if not addr_pairs_info: + raise ValueError('can not get address information') + + exceptions = [] + + if reuse_address is None: + reuse_address = os.name == 'posix' and sys.platform != 'cygwin' + + for ((family, proto), + (local_address, remote_address)) in addr_pairs_info: + sock = None + r_addr = None + try: + sock = socket.socket( + family=family, type=socket.SOCK_DGRAM, proto=proto) + if reuse_address: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + if reuse_port: + _set_reuseport(sock) + if allow_broadcast: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + sock.setblocking(False) + + if local_addr: + sock.bind(local_address) + if remote_addr: + yield from self.sock_connect(sock, remote_address) + r_addr = remote_address + except OSError as exc: + if sock is not None: + sock.close() + exceptions.append(exc) + except: + if sock is not None: + sock.close() + raise + else: + break + else: + raise exceptions[0] + + protocol = protocol_factory() + waiter = self.create_future() + transport = self._make_datagram_transport( + sock, protocol, r_addr, waiter) + if self._debug: + if local_addr: + logger.info("Datagram endpoint local_addr=%r remote_addr=%r " + "created: (%r, %r)", + local_addr, remote_addr, transport, protocol) + else: + logger.debug("Datagram endpoint remote_addr=%r created: " + "(%r, %r)", + remote_addr, transport, protocol) + + try: + yield from waiter + except: + transport.close() + raise + + return transport, protocol + + @coroutine + def _create_server_getaddrinfo(self, host, port, family, flags): + infos = yield from _ensure_resolved((host, port), family=family, + type=socket.SOCK_STREAM, + flags=flags, loop=self) + if not infos: + raise OSError('getaddrinfo({!r}) returned empty list'.format(host)) + return infos + + @coroutine + def create_server(self, protocol_factory, host=None, port=None, + *, + family=socket.AF_UNSPEC, + flags=socket.AI_PASSIVE, + sock=None, + backlog=100, + ssl=None, + reuse_address=None, + reuse_port=None): + """Create a TCP server. + + The host parameter can be a string, in that case the TCP server is bound + to host and port. + + The host parameter can also be a sequence of strings and in that case + the TCP server is bound to all hosts of the sequence. If a host + appears multiple times (possibly indirectly e.g. when hostnames + resolve to the same IP address), the server is only bound once to that + host. + + Return a Server object which can be used to stop the service. + + This method is a coroutine. + """ + if isinstance(ssl, bool): + raise TypeError('ssl argument must be an SSLContext or None') + if host is not None or port is not None: + if sock is not None: + raise ValueError( + 'host/port and sock can not be specified at the same time') + + AF_INET6 = getattr(socket, 'AF_INET6', 0) + if reuse_address is None: + reuse_address = os.name == 'posix' and sys.platform != 'cygwin' + sockets = [] + if host == '': + hosts = [None] + elif (isinstance(host, str) or + not isinstance(host, collections.Iterable)): + hosts = [host] + else: + hosts = host + + fs = [self._create_server_getaddrinfo(host, port, family=family, + flags=flags) + for host in hosts] + infos = yield from tasks.gather(*fs, loop=self) + infos = set(itertools.chain.from_iterable(infos)) + + completed = False + try: + for res in infos: + af, socktype, proto, canonname, sa = res + try: + sock = socket.socket(af, socktype, proto) + except socket.error: + # Assume it's a bad family/type/protocol combination. + if self._debug: + logger.warning('create_server() failed to create ' + 'socket.socket(%r, %r, %r)', + af, socktype, proto, exc_info=True) + continue + sockets.append(sock) + if reuse_address: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + if reuse_port: + _set_reuseport(sock) + # Disable IPv4/IPv6 dual stack support (enabled by + # default on Linux) which makes a single socket + # listen on both address families. + if af == AF_INET6 and hasattr(socket, 'IPPROTO_IPV6'): + sock.setsockopt(socket.IPPROTO_IPV6, + socket.IPV6_V6ONLY, + True) + try: + sock.bind(sa) + except OSError as err: + raise OSError(err.errno, 'error while attempting ' + 'to bind on address %r: %s' + % (sa, err.strerror.lower())) + completed = True + finally: + if not completed: + for sock in sockets: + sock.close() + else: + if sock is None: + raise ValueError('Neither host/port nor sock were specified') + if not _is_stream_socket(sock): + raise ValueError( + 'A Stream Socket was expected, got {!r}'.format(sock)) + sockets = [sock] + + server = Server(self, sockets) + for sock in sockets: + sock.listen(backlog) + sock.setblocking(False) + self._start_serving(protocol_factory, sock, ssl, server, backlog) + if self._debug: + logger.info("%r is serving", server) + return server + + @coroutine + def connect_accepted_socket(self, protocol_factory, sock, *, ssl=None): + """Handle an accepted connection. + + This is used by servers that accept connections outside of + asyncio but that use asyncio to handle connections. + + This method is a coroutine. When completed, the coroutine + returns a (transport, protocol) pair. + """ + if not _is_stream_socket(sock): + raise ValueError( + 'A Stream Socket was expected, got {!r}'.format(sock)) + + transport, protocol = yield from self._create_connection_transport( + sock, protocol_factory, ssl, '', server_side=True) + if self._debug: + # Get the socket from the transport because SSL transport closes + # the old socket and creates a new SSL socket + sock = transport.get_extra_info('socket') + logger.debug("%r handled: (%r, %r)", sock, transport, protocol) + return transport, protocol + + @coroutine + def connect_read_pipe(self, protocol_factory, pipe): + protocol = protocol_factory() + waiter = self.create_future() + transport = self._make_read_pipe_transport(pipe, protocol, waiter) + + try: + yield from waiter + except: + transport.close() + raise + + if self._debug: + logger.debug('Read pipe %r connected: (%r, %r)', + pipe.fileno(), transport, protocol) + return transport, protocol + + @coroutine + def connect_write_pipe(self, protocol_factory, pipe): + protocol = protocol_factory() + waiter = self.create_future() + transport = self._make_write_pipe_transport(pipe, protocol, waiter) + + try: + yield from waiter + except: + transport.close() + raise + + if self._debug: + logger.debug('Write pipe %r connected: (%r, %r)', + pipe.fileno(), transport, protocol) + return transport, protocol + + def _log_subprocess(self, msg, stdin, stdout, stderr): + info = [msg] + if stdin is not None: + info.append('stdin=%s' % _format_pipe(stdin)) + if stdout is not None and stderr == subprocess.STDOUT: + info.append('stdout=stderr=%s' % _format_pipe(stdout)) + else: + if stdout is not None: + info.append('stdout=%s' % _format_pipe(stdout)) + if stderr is not None: + info.append('stderr=%s' % _format_pipe(stderr)) + logger.debug(' '.join(info)) + + @coroutine + def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + universal_newlines=False, shell=True, bufsize=0, + **kwargs): + if not isinstance(cmd, (bytes, str)): + raise ValueError("cmd must be a string") + if universal_newlines: + raise ValueError("universal_newlines must be False") + if not shell: + raise ValueError("shell must be True") + if bufsize != 0: + raise ValueError("bufsize must be 0") + protocol = protocol_factory() + if self._debug: + # don't log parameters: they may contain sensitive information + # (password) and may be too long + debug_log = 'run shell command %r' % cmd + self._log_subprocess(debug_log, stdin, stdout, stderr) + transport = yield from self._make_subprocess_transport( + protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs) + if self._debug: + logger.info('%s: %r', debug_log, transport) + return transport, protocol + + @coroutine + def subprocess_exec(self, protocol_factory, program, *args, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=False, + shell=False, bufsize=0, **kwargs): + if universal_newlines: + raise ValueError("universal_newlines must be False") + if shell: + raise ValueError("shell must be False") + if bufsize != 0: + raise ValueError("bufsize must be 0") + popen_args = (program,) + args + for arg in popen_args: + if not isinstance(arg, (str, bytes)): + raise TypeError("program arguments must be " + "a bytes or text string, not %s" + % type(arg).__name__) + protocol = protocol_factory() + if self._debug: + # don't log parameters: they may contain sensitive information + # (password) and may be too long + debug_log = 'execute program %r' % program + self._log_subprocess(debug_log, stdin, stdout, stderr) + transport = yield from self._make_subprocess_transport( + protocol, popen_args, False, stdin, stdout, stderr, + bufsize, **kwargs) + if self._debug: + logger.info('%s: %r', debug_log, transport) + return transport, protocol + + def get_exception_handler(self): + """Return an exception handler, or None if the default one is in use. + """ + return self._exception_handler + + def set_exception_handler(self, handler): + """Set handler as the new event loop exception handler. + + If handler is None, the default exception handler will + be set. + + If handler is a callable object, it should have a + signature matching '(loop, context)', where 'loop' + will be a reference to the active event loop, 'context' + will be a dict object (see `call_exception_handler()` + documentation for details about context). + """ + if handler is not None and not callable(handler): + raise TypeError('A callable object or None is expected, ' + 'got {!r}'.format(handler)) + self._exception_handler = handler + + def default_exception_handler(self, context): + """Default exception handler. + + This is called when an exception occurs and no exception + handler is set, and can be called by a custom exception + handler that wants to defer to the default behavior. + + The context parameter has the same meaning as in + `call_exception_handler()`. + """ + message = context.get('message') + if not message: + message = 'Unhandled exception in event loop' + + exception = context.get('exception') + if exception is not None: + exc_info = (type(exception), exception, exception.__traceback__) + else: + exc_info = False + + if ('source_traceback' not in context + and self._current_handle is not None + and self._current_handle._source_traceback): + context['handle_traceback'] = self._current_handle._source_traceback + + log_lines = [message] + for key in sorted(context): + if key in {'message', 'exception'}: + continue + value = context[key] + if key == 'source_traceback': + tb = ''.join(traceback.format_list(value)) + value = 'Object created at (most recent call last):\n' + value += tb.rstrip() + elif key == 'handle_traceback': + tb = ''.join(traceback.format_list(value)) + value = 'Handle created at (most recent call last):\n' + value += tb.rstrip() + else: + value = repr(value) + log_lines.append('{}: {}'.format(key, value)) + + logger.error('\n'.join(log_lines), exc_info=exc_info) + + def call_exception_handler(self, context): + """Call the current event loop's exception handler. + + The context argument is a dict containing the following keys: + + - 'message': Error message; + - 'exception' (optional): Exception object; + - 'future' (optional): Future instance; + - 'handle' (optional): Handle instance; + - 'protocol' (optional): Protocol instance; + - 'transport' (optional): Transport instance; + - 'socket' (optional): Socket instance; + - 'asyncgen' (optional): Asynchronous generator that caused + the exception. + + New keys maybe introduced in the future. + + Note: do not overload this method in an event loop subclass. + For custom exception handling, use the + `set_exception_handler()` method. + """ + if self._exception_handler is None: + try: + self.default_exception_handler(context) + except Exception: + # Second protection layer for unexpected errors + # in the default implementation, as well as for subclassed + # event loops with overloaded "default_exception_handler". + logger.error('Exception in default exception handler', + exc_info=True) + else: + try: + self._exception_handler(self, context) + except Exception as exc: + # Exception in the user set custom exception handler. + try: + # Let's try default handler. + self.default_exception_handler({ + 'message': 'Unhandled error in exception handler', + 'exception': exc, + 'context': context, + }) + except Exception: + # Guard 'default_exception_handler' in case it is + # overloaded. + logger.error('Exception in default exception handler ' + 'while handling an unexpected error ' + 'in custom exception handler', + exc_info=True) + + def _add_callback(self, handle): + """Add a Handle to _scheduled (TimerHandle) or _ready.""" + assert isinstance(handle, events.Handle), 'A Handle is required here' + if handle._cancelled: + return + assert not isinstance(handle, events.TimerHandle) + self._ready.append(handle) + + def _add_callback_signalsafe(self, handle): + """Like _add_callback() but called from a signal handler.""" + self._add_callback(handle) + self._write_to_self() + + def _timer_handle_cancelled(self, handle): + """Notification that a TimerHandle has been cancelled.""" + if handle._scheduled: + self._timer_cancelled_count += 1 + + def _run_once(self): + """Run one full iteration of the event loop. + + This calls all currently ready callbacks, polls for I/O, + schedules the resulting callbacks, and finally schedules + 'call_later' callbacks. + """ + + sched_count = len(self._scheduled) + if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and + self._timer_cancelled_count / sched_count > + _MIN_CANCELLED_TIMER_HANDLES_FRACTION): + # Remove delayed calls that were cancelled if their number + # is too high + new_scheduled = [] + for handle in self._scheduled: + if handle._cancelled: + handle._scheduled = False + else: + new_scheduled.append(handle) + + heapq.heapify(new_scheduled) + self._scheduled = new_scheduled + self._timer_cancelled_count = 0 + else: + # Remove delayed calls that were cancelled from head of queue. + while self._scheduled and self._scheduled[0]._cancelled: + self._timer_cancelled_count -= 1 + handle = heapq.heappop(self._scheduled) + handle._scheduled = False + + timeout = None + if self._ready or self._stopping: + timeout = 0 + elif self._scheduled: + # Compute the desired timeout. + when = self._scheduled[0]._when + timeout = max(0, when - self.time()) + + if self._debug and timeout != 0: + t0 = self.time() + event_list = self._selector.select(timeout) + dt = self.time() - t0 + if dt >= 1.0: + level = logging.INFO + else: + level = logging.DEBUG + nevent = len(event_list) + if timeout is None: + logger.log(level, 'poll took %.3f ms: %s events', + dt * 1e3, nevent) + elif nevent: + logger.log(level, + 'poll %.3f ms took %.3f ms: %s events', + timeout * 1e3, dt * 1e3, nevent) + elif dt >= 1.0: + logger.log(level, + 'poll %.3f ms took %.3f ms: timeout', + timeout * 1e3, dt * 1e3) + else: + event_list = self._selector.select(timeout) + self._process_events(event_list) + + # Handle 'later' callbacks that are ready. + end_time = self.time() + self._clock_resolution + while self._scheduled: + handle = self._scheduled[0] + if handle._when >= end_time: + break + handle = heapq.heappop(self._scheduled) + handle._scheduled = False + self._ready.append(handle) + + # This is the only place where callbacks are actually *called*. + # All other places just add them to ready. + # Note: We run all currently scheduled callbacks, but not any + # callbacks scheduled by callbacks run this time around -- + # they will be run the next time (after another I/O poll). + # Use an idiom that is thread-safe without using locks. + ntodo = len(self._ready) + for i in range(ntodo): + handle = self._ready.popleft() + if handle._cancelled: + continue + if self._debug: + try: + self._current_handle = handle + t0 = self.time() + handle._run() + dt = self.time() - t0 + if dt >= self.slow_callback_duration: + logger.warning('Executing %s took %.3f seconds', + _format_handle(handle), dt) + finally: + self._current_handle = None + else: + handle._run() + handle = None # Needed to break cycles when an exception occurs. + + def _set_coroutine_wrapper(self, enabled): + try: + set_wrapper = sys.set_coroutine_wrapper + get_wrapper = sys.get_coroutine_wrapper + except AttributeError: + return + + enabled = bool(enabled) + if self._coroutine_wrapper_set == enabled: + return + + wrapper = coroutines.debug_wrapper + current_wrapper = get_wrapper() + + if enabled: + if current_wrapper not in (None, wrapper): + warnings.warn( + "loop.set_debug(True): cannot set debug coroutine " + "wrapper; another wrapper is already set %r" % + current_wrapper, RuntimeWarning) + else: + set_wrapper(wrapper) + self._coroutine_wrapper_set = True + else: + if current_wrapper not in (None, wrapper): + warnings.warn( + "loop.set_debug(False): cannot unset debug coroutine " + "wrapper; another wrapper was set %r" % + current_wrapper, RuntimeWarning) + else: + set_wrapper(None) + self._coroutine_wrapper_set = False + + def get_debug(self): + return self._debug + + def set_debug(self, enabled): + self._debug = enabled + + if self.is_running(): + self._set_coroutine_wrapper(enabled) diff --git a/Lib/asyncio/base_futures.py b/Lib/asyncio/base_futures.py new file mode 100644 index 0000000000..01259a062e --- /dev/null +++ b/Lib/asyncio/base_futures.py @@ -0,0 +1,71 @@ +__all__ = [] + +import concurrent.futures._base +import reprlib + +from . import events + +Error = concurrent.futures._base.Error +CancelledError = concurrent.futures.CancelledError +TimeoutError = concurrent.futures.TimeoutError + + +class InvalidStateError(Error): + """The operation is not allowed in this state.""" + + +# States for Future. +_PENDING = 'PENDING' +_CANCELLED = 'CANCELLED' +_FINISHED = 'FINISHED' + + +def isfuture(obj): + """Check for a Future. + + This returns True when obj is a Future instance or is advertising + itself as duck-type compatible by setting _asyncio_future_blocking. + See comment in Future for more details. + """ + return (hasattr(obj.__class__, '_asyncio_future_blocking') and + obj._asyncio_future_blocking is not None) + + +def _format_callbacks(cb): + """helper function for Future.__repr__""" + size = len(cb) + if not size: + cb = '' + + def format_cb(callback): + return events._format_callback_source(callback, ()) + + if size == 1: + cb = format_cb(cb[0]) + elif size == 2: + cb = '{}, {}'.format(format_cb(cb[0]), format_cb(cb[1])) + elif size > 2: + cb = '{}, <{} more>, {}'.format(format_cb(cb[0]), + size - 2, + format_cb(cb[-1])) + return 'cb=[%s]' % cb + + +def _future_repr_info(future): + # (Future) -> str + """helper function for Future.__repr__""" + info = [future._state.lower()] + if future._state == _FINISHED: + if future._exception is not None: + info.append('exception={!r}'.format(future._exception)) + else: + # use reprlib to limit the length of the output, especially + # for very long strings + result = reprlib.repr(future._result) + info.append('result={}'.format(result)) + if future._callbacks: + info.append(_format_callbacks(future._callbacks)) + if future._source_traceback: + frame = future._source_traceback[-1] + info.append('created at %s:%s' % (frame[0], frame[1])) + return info diff --git a/Lib/asyncio/base_subprocess.py b/Lib/asyncio/base_subprocess.py new file mode 100644 index 0000000000..a00d9d5732 --- /dev/null +++ b/Lib/asyncio/base_subprocess.py @@ -0,0 +1,293 @@ +import collections +import subprocess +import warnings + +from . import compat +from . import protocols +from . import transports +from .coroutines import coroutine +from .log import logger + + +class BaseSubprocessTransport(transports.SubprocessTransport): + + def __init__(self, loop, protocol, args, shell, + stdin, stdout, stderr, bufsize, + waiter=None, extra=None, **kwargs): + super().__init__(extra) + self._closed = False + self._protocol = protocol + self._loop = loop + self._proc = None + self._pid = None + self._returncode = None + self._exit_waiters = [] + self._pending_calls = collections.deque() + self._pipes = {} + self._finished = False + + if stdin == subprocess.PIPE: + self._pipes[0] = None + if stdout == subprocess.PIPE: + self._pipes[1] = None + if stderr == subprocess.PIPE: + self._pipes[2] = None + + # Create the child process: set the _proc attribute + try: + self._start(args=args, shell=shell, stdin=stdin, stdout=stdout, + stderr=stderr, bufsize=bufsize, **kwargs) + except: + self.close() + raise + + self._pid = self._proc.pid + self._extra['subprocess'] = self._proc + + if self._loop.get_debug(): + if isinstance(args, (bytes, str)): + program = args + else: + program = args[0] + logger.debug('process %r created: pid %s', + program, self._pid) + + self._loop.create_task(self._connect_pipes(waiter)) + + def __repr__(self): + info = [self.__class__.__name__] + if self._closed: + info.append('closed') + if self._pid is not None: + info.append('pid=%s' % self._pid) + if self._returncode is not None: + info.append('returncode=%s' % self._returncode) + elif self._pid is not None: + info.append('running') + else: + info.append('not started') + + stdin = self._pipes.get(0) + if stdin is not None: + info.append('stdin=%s' % stdin.pipe) + + stdout = self._pipes.get(1) + stderr = self._pipes.get(2) + if stdout is not None and stderr is stdout: + info.append('stdout=stderr=%s' % stdout.pipe) + else: + if stdout is not None: + info.append('stdout=%s' % stdout.pipe) + if stderr is not None: + info.append('stderr=%s' % stderr.pipe) + + return '<%s>' % ' '.join(info) + + def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): + raise NotImplementedError + + def set_protocol(self, protocol): + self._protocol = protocol + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closed + + def close(self): + if self._closed: + return + self._closed = True + + for proto in self._pipes.values(): + if proto is None: + continue + proto.pipe.close() + + if (self._proc is not None + # the child process finished? + and self._returncode is None + # the child process finished but the transport was not notified yet? + and self._proc.poll() is None + ): + if self._loop.get_debug(): + logger.warning('Close running child process: kill %r', self) + + try: + self._proc.kill() + except ProcessLookupError: + pass + + # Don't clear the _proc reference yet: _post_init() may still run + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if not self._closed: + warnings.warn("unclosed transport %r" % self, ResourceWarning, + source=self) + self.close() + + def get_pid(self): + return self._pid + + def get_returncode(self): + return self._returncode + + def get_pipe_transport(self, fd): + if fd in self._pipes: + return self._pipes[fd].pipe + else: + return None + + def _check_proc(self): + if self._proc is None: + raise ProcessLookupError() + + def send_signal(self, signal): + self._check_proc() + self._proc.send_signal(signal) + + def terminate(self): + self._check_proc() + self._proc.terminate() + + def kill(self): + self._check_proc() + self._proc.kill() + + @coroutine + def _connect_pipes(self, waiter): + try: + proc = self._proc + loop = self._loop + + if proc.stdin is not None: + _, pipe = yield from loop.connect_write_pipe( + lambda: WriteSubprocessPipeProto(self, 0), + proc.stdin) + self._pipes[0] = pipe + + if proc.stdout is not None: + _, pipe = yield from loop.connect_read_pipe( + lambda: ReadSubprocessPipeProto(self, 1), + proc.stdout) + self._pipes[1] = pipe + + if proc.stderr is not None: + _, pipe = yield from loop.connect_read_pipe( + lambda: ReadSubprocessPipeProto(self, 2), + proc.stderr) + self._pipes[2] = pipe + + assert self._pending_calls is not None + + loop.call_soon(self._protocol.connection_made, self) + for callback, data in self._pending_calls: + loop.call_soon(callback, *data) + self._pending_calls = None + except Exception as exc: + if waiter is not None and not waiter.cancelled(): + waiter.set_exception(exc) + else: + if waiter is not None and not waiter.cancelled(): + waiter.set_result(None) + + def _call(self, cb, *data): + if self._pending_calls is not None: + self._pending_calls.append((cb, data)) + else: + self._loop.call_soon(cb, *data) + + def _pipe_connection_lost(self, fd, exc): + self._call(self._protocol.pipe_connection_lost, fd, exc) + self._try_finish() + + def _pipe_data_received(self, fd, data): + self._call(self._protocol.pipe_data_received, fd, data) + + def _process_exited(self, returncode): + assert returncode is not None, returncode + assert self._returncode is None, self._returncode + if self._loop.get_debug(): + logger.info('%r exited with return code %r', + self, returncode) + self._returncode = returncode + if self._proc.returncode is None: + # asyncio uses a child watcher: copy the status into the Popen + # object. On Python 3.6, it is required to avoid a ResourceWarning. + self._proc.returncode = returncode + self._call(self._protocol.process_exited) + self._try_finish() + + # wake up futures waiting for wait() + for waiter in self._exit_waiters: + if not waiter.cancelled(): + waiter.set_result(returncode) + self._exit_waiters = None + + @coroutine + def _wait(self): + """Wait until the process exit and return the process return code. + + This method is a coroutine.""" + if self._returncode is not None: + return self._returncode + + waiter = self._loop.create_future() + self._exit_waiters.append(waiter) + return (yield from waiter) + + def _try_finish(self): + assert not self._finished + if self._returncode is None: + return + if all(p is not None and p.disconnected + for p in self._pipes.values()): + self._finished = True + self._call(self._call_connection_lost, None) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + self._loop = None + self._proc = None + self._protocol = None + + +class WriteSubprocessPipeProto(protocols.BaseProtocol): + + def __init__(self, proc, fd): + self.proc = proc + self.fd = fd + self.pipe = None + self.disconnected = False + + def connection_made(self, transport): + self.pipe = transport + + def __repr__(self): + return ('<%s fd=%s pipe=%r>' + % (self.__class__.__name__, self.fd, self.pipe)) + + def connection_lost(self, exc): + self.disconnected = True + self.proc._pipe_connection_lost(self.fd, exc) + self.proc = None + + def pause_writing(self): + self.proc._protocol.pause_writing() + + def resume_writing(self): + self.proc._protocol.resume_writing() + + +class ReadSubprocessPipeProto(WriteSubprocessPipeProto, + protocols.Protocol): + + def data_received(self, data): + self.proc._pipe_data_received(self.fd, data) diff --git a/Lib/asyncio/base_tasks.py b/Lib/asyncio/base_tasks.py new file mode 100644 index 0000000000..5f34434c57 --- /dev/null +++ b/Lib/asyncio/base_tasks.py @@ -0,0 +1,76 @@ +import linecache +import traceback + +from . import base_futures +from . import coroutines + + +def _task_repr_info(task): + info = base_futures._future_repr_info(task) + + if task._must_cancel: + # replace status + info[0] = 'cancelling' + + coro = coroutines._format_coroutine(task._coro) + info.insert(1, 'coro=<%s>' % coro) + + if task._fut_waiter is not None: + info.insert(2, 'wait_for=%r' % task._fut_waiter) + return info + + +def _task_get_stack(task, limit): + frames = [] + try: + # 'async def' coroutines + f = task._coro.cr_frame + except AttributeError: + f = task._coro.gi_frame + if f is not None: + while f is not None: + if limit is not None: + if limit <= 0: + break + limit -= 1 + frames.append(f) + f = f.f_back + frames.reverse() + elif task._exception is not None: + tb = task._exception.__traceback__ + while tb is not None: + if limit is not None: + if limit <= 0: + break + limit -= 1 + frames.append(tb.tb_frame) + tb = tb.tb_next + return frames + + +def _task_print_stack(task, limit, file): + extracted_list = [] + checked = set() + for f in task.get_stack(limit=limit): + lineno = f.f_lineno + co = f.f_code + filename = co.co_filename + name = co.co_name + if filename not in checked: + checked.add(filename) + linecache.checkcache(filename) + line = linecache.getline(filename, lineno, f.f_globals) + extracted_list.append((filename, lineno, name, line)) + exc = task._exception + if not extracted_list: + print('No stack for %r' % task, file=file) + elif exc is not None: + print('Traceback for %r (most recent call last):' % task, + file=file) + else: + print('Stack for %r (most recent call last):' % task, + file=file) + traceback.print_list(extracted_list, file=file) + if exc is not None: + for line in traceback.format_exception_only(exc.__class__, exc): + print(line, file=file, end='') diff --git a/Lib/asyncio/compat.py b/Lib/asyncio/compat.py new file mode 100644 index 0000000000..4790bb4a35 --- /dev/null +++ b/Lib/asyncio/compat.py @@ -0,0 +1,18 @@ +"""Compatibility helpers for the different Python versions.""" + +import sys + +PY34 = sys.version_info >= (3, 4) +PY35 = sys.version_info >= (3, 5) +PY352 = sys.version_info >= (3, 5, 2) + + +def flatten_list_bytes(list_of_data): + """Concatenate a sequence of bytes-like objects.""" + if not PY34: + # On Python 3.3 and older, bytes.join() doesn't handle + # memoryview. + list_of_data = ( + bytes(data) if isinstance(data, memoryview) else data + for data in list_of_data) + return b''.join(list_of_data) diff --git a/Lib/asyncio/constants.py b/Lib/asyncio/constants.py new file mode 100644 index 0000000000..f9e123281e --- /dev/null +++ b/Lib/asyncio/constants.py @@ -0,0 +1,7 @@ +"""Constants.""" + +# After the connection is lost, log warnings after this many write()s. +LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5 + +# Seconds to wait before retrying accept(). +ACCEPT_RETRY_DELAY = 1 diff --git a/Lib/asyncio/coroutines.py b/Lib/asyncio/coroutines.py new file mode 100644 index 0000000000..08e94412b3 --- /dev/null +++ b/Lib/asyncio/coroutines.py @@ -0,0 +1,344 @@ +__all__ = ['coroutine', + 'iscoroutinefunction', 'iscoroutine'] + +import functools +import inspect +import opcode +import os +import sys +import traceback +import types + +from . import compat +from . import events +from . import base_futures +from .log import logger + + +# Opcode of "yield from" instruction +_YIELD_FROM = opcode.opmap['YIELD_FROM'] + +# If you set _DEBUG to true, @coroutine will wrap the resulting +# generator objects in a CoroWrapper instance (defined below). That +# instance will log a message when the generator is never iterated +# over, which may happen when you forget to use "yield from" with a +# coroutine call. Note that the value of the _DEBUG flag is taken +# when the decorator is used, so to be of any use it must be set +# before you define your coroutines. A downside of using this feature +# is that tracebacks show entries for the CoroWrapper.__next__ method +# when _DEBUG is true. +_DEBUG = (not sys.flags.ignore_environment and + bool(os.environ.get('PYTHONASYNCIODEBUG'))) + + +try: + _types_coroutine = types.coroutine + _types_CoroutineType = types.CoroutineType +except AttributeError: + # Python 3.4 + _types_coroutine = None + _types_CoroutineType = None + +try: + _inspect_iscoroutinefunction = inspect.iscoroutinefunction +except AttributeError: + # Python 3.4 + _inspect_iscoroutinefunction = lambda func: False + +try: + from collections.abc import Coroutine as _CoroutineABC, \ + Awaitable as _AwaitableABC +except ImportError: + _CoroutineABC = _AwaitableABC = None + + +# Check for CPython issue #21209 +def has_yield_from_bug(): + class MyGen: + def __init__(self): + self.send_args = None + def __iter__(self): + return self + def __next__(self): + return 42 + def send(self, *what): + self.send_args = what + return None + def yield_from_gen(gen): + yield from gen + value = (1, 2, 3) + gen = MyGen() + coro = yield_from_gen(gen) + next(coro) + coro.send(value) + return gen.send_args != (value,) +_YIELD_FROM_BUG = has_yield_from_bug() +del has_yield_from_bug + + +def debug_wrapper(gen): + # This function is called from 'sys.set_coroutine_wrapper'. + # We only wrap here coroutines defined via 'async def' syntax. + # Generator-based coroutines are wrapped in @coroutine + # decorator. + return CoroWrapper(gen, None) + + +class CoroWrapper: + # Wrapper for coroutine object in _DEBUG mode. + + def __init__(self, gen, func=None): + assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen + self.gen = gen + self.func = func # Used to unwrap @coroutine decorator + self._source_traceback = traceback.extract_stack(sys._getframe(1)) + self.__name__ = getattr(gen, '__name__', None) + self.__qualname__ = getattr(gen, '__qualname__', None) + + def __repr__(self): + coro_repr = _format_coroutine(self) + if self._source_traceback: + frame = self._source_traceback[-1] + coro_repr += ', created at %s:%s' % (frame[0], frame[1]) + return '<%s %s>' % (self.__class__.__name__, coro_repr) + + def __iter__(self): + return self + + def __next__(self): + return self.gen.send(None) + + if _YIELD_FROM_BUG: + # For for CPython issue #21209: using "yield from" and a custom + # generator, generator.send(tuple) unpacks the tuple instead of passing + # the tuple unchanged. Check if the caller is a generator using "yield + # from" to decide if the parameter should be unpacked or not. + def send(self, *value): + frame = sys._getframe() + caller = frame.f_back + assert caller.f_lasti >= 0 + if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM: + value = value[0] + return self.gen.send(value) + else: + def send(self, value): + return self.gen.send(value) + + def throw(self, type, value=None, traceback=None): + return self.gen.throw(type, value, traceback) + + def close(self): + return self.gen.close() + + @property + def gi_frame(self): + return self.gen.gi_frame + + @property + def gi_running(self): + return self.gen.gi_running + + @property + def gi_code(self): + return self.gen.gi_code + + if compat.PY35: + + def __await__(self): + cr_await = getattr(self.gen, 'cr_await', None) + if cr_await is not None: + raise RuntimeError( + "Cannot await on coroutine {!r} while it's " + "awaiting for {!r}".format(self.gen, cr_await)) + return self + + @property + def gi_yieldfrom(self): + return self.gen.gi_yieldfrom + + @property + def cr_await(self): + return self.gen.cr_await + + @property + def cr_running(self): + return self.gen.cr_running + + @property + def cr_code(self): + return self.gen.cr_code + + @property + def cr_frame(self): + return self.gen.cr_frame + + def __del__(self): + # Be careful accessing self.gen.frame -- self.gen might not exist. + gen = getattr(self, 'gen', None) + frame = getattr(gen, 'gi_frame', None) + if frame is None: + frame = getattr(gen, 'cr_frame', None) + if frame is not None and frame.f_lasti == -1: + msg = '%r was never yielded from' % self + tb = getattr(self, '_source_traceback', ()) + if tb: + tb = ''.join(traceback.format_list(tb)) + msg += ('\nCoroutine object created at ' + '(most recent call last):\n') + msg += tb.rstrip() + logger.error(msg) + + +def coroutine(func): + """Decorator to mark coroutines. + + If the coroutine is not yielded from before it is destroyed, + an error message is logged. + """ + if _inspect_iscoroutinefunction(func): + # In Python 3.5 that's all we need to do for coroutines + # defiend with "async def". + # Wrapping in CoroWrapper will happen via + # 'sys.set_coroutine_wrapper' function. + return func + + if inspect.isgeneratorfunction(func): + coro = func + else: + @functools.wraps(func) + def coro(*args, **kw): + res = func(*args, **kw) + if (base_futures.isfuture(res) or inspect.isgenerator(res) or + isinstance(res, CoroWrapper)): + res = yield from res + elif _AwaitableABC is not None: + # If 'func' returns an Awaitable (new in 3.5) we + # want to run it. + try: + await_meth = res.__await__ + except AttributeError: + pass + else: + if isinstance(res, _AwaitableABC): + res = yield from await_meth() + return res + + if not _DEBUG: + if _types_coroutine is None: + wrapper = coro + else: + wrapper = _types_coroutine(coro) + else: + @functools.wraps(func) + def wrapper(*args, **kwds): + w = CoroWrapper(coro(*args, **kwds), func=func) + if w._source_traceback: + del w._source_traceback[-1] + # Python < 3.5 does not implement __qualname__ + # on generator objects, so we set it manually. + # We use getattr as some callables (such as + # functools.partial may lack __qualname__). + w.__name__ = getattr(func, '__name__', None) + w.__qualname__ = getattr(func, '__qualname__', None) + return w + + wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction(). + return wrapper + + +# A marker for iscoroutinefunction. +_is_coroutine = object() + + +def iscoroutinefunction(func): + """Return True if func is a decorated coroutine function.""" + return (getattr(func, '_is_coroutine', None) is _is_coroutine or + _inspect_iscoroutinefunction(func)) + + +_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper) +if _CoroutineABC is not None: + _COROUTINE_TYPES += (_CoroutineABC,) +if _types_CoroutineType is not None: + # Prioritize native coroutine check to speed-up + # asyncio.iscoroutine. + _COROUTINE_TYPES = (_types_CoroutineType,) + _COROUTINE_TYPES + + +def iscoroutine(obj): + """Return True if obj is a coroutine object.""" + return isinstance(obj, _COROUTINE_TYPES) + + +def _format_coroutine(coro): + assert iscoroutine(coro) + + if not hasattr(coro, 'cr_code') and not hasattr(coro, 'gi_code'): + # Most likely a built-in type or a Cython coroutine. + + # Built-in types might not have __qualname__ or __name__. + coro_name = getattr( + coro, '__qualname__', + getattr(coro, '__name__', type(coro).__name__)) + coro_name = '{}()'.format(coro_name) + + running = False + try: + running = coro.cr_running + except AttributeError: + try: + running = coro.gi_running + except AttributeError: + pass + + if running: + return '{} running'.format(coro_name) + else: + return coro_name + + coro_name = None + if isinstance(coro, CoroWrapper): + func = coro.func + coro_name = coro.__qualname__ + if coro_name is not None: + coro_name = '{}()'.format(coro_name) + else: + func = coro + + if coro_name is None: + coro_name = events._format_callback(func, (), {}) + + try: + coro_code = coro.gi_code + except AttributeError: + coro_code = coro.cr_code + + try: + coro_frame = coro.gi_frame + except AttributeError: + coro_frame = coro.cr_frame + + filename = coro_code.co_filename + lineno = 0 + if (isinstance(coro, CoroWrapper) and + not inspect.isgeneratorfunction(coro.func) and + coro.func is not None): + source = events._get_function_source(coro.func) + if source is not None: + filename, lineno = source + if coro_frame is None: + coro_repr = ('%s done, defined at %s:%s' + % (coro_name, filename, lineno)) + else: + coro_repr = ('%s running, defined at %s:%s' + % (coro_name, filename, lineno)) + elif coro_frame is not None: + lineno = coro_frame.f_lineno + coro_repr = ('%s running at %s:%s' + % (coro_name, filename, lineno)) + else: + lineno = coro_code.co_firstlineno + coro_repr = ('%s done, defined at %s:%s' + % (coro_name, filename, lineno)) + + return coro_repr diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py new file mode 100644 index 0000000000..28a45fc3cc --- /dev/null +++ b/Lib/asyncio/events.py @@ -0,0 +1,692 @@ +"""Event loop and event loop policy.""" + +__all__ = ['AbstractEventLoopPolicy', + 'AbstractEventLoop', 'AbstractServer', + 'Handle', 'TimerHandle', + 'get_event_loop_policy', 'set_event_loop_policy', + 'get_event_loop', 'set_event_loop', 'new_event_loop', + 'get_child_watcher', 'set_child_watcher', + '_set_running_loop', '_get_running_loop', + ] + +import functools +import inspect +import reprlib +import socket +import subprocess +import sys +import threading +import traceback + +from asyncio import compat + + +def _get_function_source(func): + if compat.PY34: + func = inspect.unwrap(func) + elif hasattr(func, '__wrapped__'): + func = func.__wrapped__ + if inspect.isfunction(func): + code = func.__code__ + return (code.co_filename, code.co_firstlineno) + if isinstance(func, functools.partial): + return _get_function_source(func.func) + if compat.PY34 and isinstance(func, functools.partialmethod): + return _get_function_source(func.func) + return None + + +def _format_args_and_kwargs(args, kwargs): + """Format function arguments and keyword arguments. + + Special case for a single parameter: ('hello',) is formatted as ('hello'). + """ + # use reprlib to limit the length of the output + items = [] + if args: + items.extend(reprlib.repr(arg) for arg in args) + if kwargs: + items.extend('{}={}'.format(k, reprlib.repr(v)) + for k, v in kwargs.items()) + return '(' + ', '.join(items) + ')' + + +def _format_callback(func, args, kwargs, suffix=''): + if isinstance(func, functools.partial): + suffix = _format_args_and_kwargs(args, kwargs) + suffix + return _format_callback(func.func, func.args, func.keywords, suffix) + + if hasattr(func, '__qualname__'): + func_repr = getattr(func, '__qualname__') + elif hasattr(func, '__name__'): + func_repr = getattr(func, '__name__') + else: + func_repr = repr(func) + + func_repr += _format_args_and_kwargs(args, kwargs) + if suffix: + func_repr += suffix + return func_repr + +def _format_callback_source(func, args): + func_repr = _format_callback(func, args, None) + source = _get_function_source(func) + if source: + func_repr += ' at %s:%s' % source + return func_repr + + +class Handle: + """Object returned by callback registration methods.""" + + __slots__ = ('_callback', '_args', '_cancelled', '_loop', + '_source_traceback', '_repr', '__weakref__') + + def __init__(self, callback, args, loop): + self._loop = loop + self._callback = callback + self._args = args + self._cancelled = False + self._repr = None + if self._loop.get_debug(): + self._source_traceback = traceback.extract_stack(sys._getframe(1)) + else: + self._source_traceback = None + + def _repr_info(self): + info = [self.__class__.__name__] + if self._cancelled: + info.append('cancelled') + if self._callback is not None: + info.append(_format_callback_source(self._callback, self._args)) + if self._source_traceback: + frame = self._source_traceback[-1] + info.append('created at %s:%s' % (frame[0], frame[1])) + return info + + def __repr__(self): + if self._repr is not None: + return self._repr + info = self._repr_info() + return '<%s>' % ' '.join(info) + + def cancel(self): + if not self._cancelled: + self._cancelled = True + if self._loop.get_debug(): + # Keep a representation in debug mode to keep callback and + # parameters. For example, to log the warning + # "Executing took 2.5 second" + self._repr = repr(self) + self._callback = None + self._args = None + + def _run(self): + try: + self._callback(*self._args) + except Exception as exc: + cb = _format_callback_source(self._callback, self._args) + msg = 'Exception in callback {}'.format(cb) + context = { + 'message': msg, + 'exception': exc, + 'handle': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + self = None # Needed to break cycles when an exception occurs. + + +class TimerHandle(Handle): + """Object returned by timed callback registration methods.""" + + __slots__ = ['_scheduled', '_when'] + + def __init__(self, when, callback, args, loop): + assert when is not None + super().__init__(callback, args, loop) + if self._source_traceback: + del self._source_traceback[-1] + self._when = when + self._scheduled = False + + def _repr_info(self): + info = super()._repr_info() + pos = 2 if self._cancelled else 1 + info.insert(pos, 'when=%s' % self._when) + return info + + def __hash__(self): + return hash(self._when) + + def __lt__(self, other): + return self._when < other._when + + def __le__(self, other): + if self._when < other._when: + return True + return self.__eq__(other) + + def __gt__(self, other): + return self._when > other._when + + def __ge__(self, other): + if self._when > other._when: + return True + return self.__eq__(other) + + def __eq__(self, other): + if isinstance(other, TimerHandle): + return (self._when == other._when and + self._callback == other._callback and + self._args == other._args and + self._cancelled == other._cancelled) + return NotImplemented + + def __ne__(self, other): + equal = self.__eq__(other) + return NotImplemented if equal is NotImplemented else not equal + + def cancel(self): + if not self._cancelled: + self._loop._timer_handle_cancelled(self) + super().cancel() + + +class AbstractServer: + """Abstract server returned by create_server().""" + + def close(self): + """Stop serving. This leaves existing connections open.""" + return NotImplemented + + def wait_closed(self): + """Coroutine to wait until service is closed.""" + return NotImplemented + + +class AbstractEventLoop: + """Abstract event loop.""" + + # Running and stopping the event loop. + + def run_forever(self): + """Run the event loop until stop() is called.""" + raise NotImplementedError + + def run_until_complete(self, future): + """Run the event loop until a Future is done. + + Return the Future's result, or raise its exception. + """ + raise NotImplementedError + + def stop(self): + """Stop the event loop as soon as reasonable. + + Exactly how soon that is may depend on the implementation, but + no more I/O callbacks should be scheduled. + """ + raise NotImplementedError + + def is_running(self): + """Return whether the event loop is currently running.""" + raise NotImplementedError + + def is_closed(self): + """Returns True if the event loop was closed.""" + raise NotImplementedError + + def close(self): + """Close the loop. + + The loop should not be running. + + This is idempotent and irreversible. + + No other methods should be called after this one. + """ + raise NotImplementedError + + def shutdown_asyncgens(self): + """Shutdown all active asynchronous generators.""" + raise NotImplementedError + + # Methods scheduling callbacks. All these return Handles. + + def _timer_handle_cancelled(self, handle): + """Notification that a TimerHandle has been cancelled.""" + raise NotImplementedError + + def call_soon(self, callback, *args): + return self.call_later(0, callback, *args) + + def call_later(self, delay, callback, *args): + raise NotImplementedError + + def call_at(self, when, callback, *args): + raise NotImplementedError + + def time(self): + raise NotImplementedError + + def create_future(self): + raise NotImplementedError + + # Method scheduling a coroutine object: create a task. + + def create_task(self, coro): + raise NotImplementedError + + # Methods for interacting with threads. + + def call_soon_threadsafe(self, callback, *args): + raise NotImplementedError + + def run_in_executor(self, executor, func, *args): + raise NotImplementedError + + def set_default_executor(self, executor): + raise NotImplementedError + + # Network I/O methods returning Futures. + + def getaddrinfo(self, host, port, *, family=0, type=0, proto=0, flags=0): + raise NotImplementedError + + def getnameinfo(self, sockaddr, flags=0): + raise NotImplementedError + + def create_connection(self, protocol_factory, host=None, port=None, *, + ssl=None, family=0, proto=0, flags=0, sock=None, + local_addr=None, server_hostname=None): + raise NotImplementedError + + def create_server(self, protocol_factory, host=None, port=None, *, + family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, + sock=None, backlog=100, ssl=None, reuse_address=None, + reuse_port=None): + """A coroutine which creates a TCP server bound to host and port. + + The return value is a Server object which can be used to stop + the service. + + If host is an empty string or None all interfaces are assumed + and a list of multiple sockets will be returned (most likely + one for IPv4 and another one for IPv6). The host parameter can also be a + sequence (e.g. list) of hosts to bind to. + + family can be set to either AF_INET or AF_INET6 to force the + socket to use IPv4 or IPv6. If not set it will be determined + from host (defaults to AF_UNSPEC). + + flags is a bitmask for getaddrinfo(). + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows. + """ + raise NotImplementedError + + def create_unix_connection(self, protocol_factory, path, *, + ssl=None, sock=None, + server_hostname=None): + raise NotImplementedError + + def create_unix_server(self, protocol_factory, path, *, + sock=None, backlog=100, ssl=None): + """A coroutine which creates a UNIX Domain Socket server. + + The return value is a Server object, which can be used to stop + the service. + + path is a str, representing a file systsem path to bind the + server socket to. + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + """ + raise NotImplementedError + + def create_datagram_endpoint(self, protocol_factory, + local_addr=None, remote_addr=None, *, + family=0, proto=0, flags=0, + reuse_address=None, reuse_port=None, + allow_broadcast=None, sock=None): + """A coroutine which creates a datagram endpoint. + + This method will try to establish the endpoint in the background. + When successful, the coroutine returns a (transport, protocol) pair. + + protocol_factory must be a callable returning a protocol instance. + + socket family AF_INET or socket.AF_INET6 depending on host (or + family if specified), socket type SOCK_DGRAM. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified it will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows and some UNIX's. If the + :py:data:`~socket.SO_REUSEPORT` constant is not defined then this + capability is unsupported. + + allow_broadcast tells the kernel to allow this endpoint to send + messages to the broadcast address. + + sock can optionally be specified in order to use a preexisting + socket object. + """ + raise NotImplementedError + + # Pipes and subprocesses. + + def connect_read_pipe(self, protocol_factory, pipe): + """Register read pipe in event loop. Set the pipe to non-blocking mode. + + protocol_factory should instantiate object with Protocol interface. + pipe is a file-like object. + Return pair (transport, protocol), where transport supports the + ReadTransport interface.""" + # The reason to accept file-like object instead of just file descriptor + # is: we need to own pipe and close it at transport finishing + # Can got complicated errors if pass f.fileno(), + # close fd in pipe transport then close f and vise versa. + raise NotImplementedError + + def connect_write_pipe(self, protocol_factory, pipe): + """Register write pipe in event loop. + + protocol_factory should instantiate object with BaseProtocol interface. + Pipe is file-like object already switched to nonblocking. + Return pair (transport, protocol), where transport support + WriteTransport interface.""" + # The reason to accept file-like object instead of just file descriptor + # is: we need to own pipe and close it at transport finishing + # Can got complicated errors if pass f.fileno(), + # close fd in pipe transport then close f and vise versa. + raise NotImplementedError + + def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + **kwargs): + raise NotImplementedError + + def subprocess_exec(self, protocol_factory, *args, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + **kwargs): + raise NotImplementedError + + # Ready-based callback registration methods. + # The add_*() methods return None. + # The remove_*() methods return True if something was removed, + # False if there was nothing to delete. + + def add_reader(self, fd, callback, *args): + raise NotImplementedError + + def remove_reader(self, fd): + raise NotImplementedError + + def add_writer(self, fd, callback, *args): + raise NotImplementedError + + def remove_writer(self, fd): + raise NotImplementedError + + # Completion based I/O methods returning Futures. + + def sock_recv(self, sock, nbytes): + raise NotImplementedError + + def sock_sendall(self, sock, data): + raise NotImplementedError + + def sock_connect(self, sock, address): + raise NotImplementedError + + def sock_accept(self, sock): + raise NotImplementedError + + # Signal handling. + + def add_signal_handler(self, sig, callback, *args): + raise NotImplementedError + + def remove_signal_handler(self, sig): + raise NotImplementedError + + # Task factory. + + def set_task_factory(self, factory): + raise NotImplementedError + + def get_task_factory(self): + raise NotImplementedError + + # Error handlers. + + def get_exception_handler(self): + raise NotImplementedError + + def set_exception_handler(self, handler): + raise NotImplementedError + + def default_exception_handler(self, context): + raise NotImplementedError + + def call_exception_handler(self, context): + raise NotImplementedError + + # Debug flag management. + + def get_debug(self): + raise NotImplementedError + + def set_debug(self, enabled): + raise NotImplementedError + + +class AbstractEventLoopPolicy: + """Abstract policy for accessing the event loop.""" + + def get_event_loop(self): + """Get the event loop for the current context. + + Returns an event loop object implementing the BaseEventLoop interface, + or raises an exception in case no event loop has been set for the + current context and the current policy does not specify to create one. + + It should never return None.""" + raise NotImplementedError + + def set_event_loop(self, loop): + """Set the event loop for the current context to loop.""" + raise NotImplementedError + + def new_event_loop(self): + """Create and return a new event loop object according to this + policy's rules. If there's need to set this loop as the event loop for + the current context, set_event_loop must be called explicitly.""" + raise NotImplementedError + + # Child processes handling (Unix only). + + def get_child_watcher(self): + "Get the watcher for child processes." + raise NotImplementedError + + def set_child_watcher(self, watcher): + """Set the watcher for child processes.""" + raise NotImplementedError + + +class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy): + """Default policy implementation for accessing the event loop. + + In this policy, each thread has its own event loop. However, we + only automatically create an event loop by default for the main + thread; other threads by default have no event loop. + + Other policies may have different rules (e.g. a single global + event loop, or automatically creating an event loop per thread, or + using some other notion of context to which an event loop is + associated). + """ + + _loop_factory = None + + class _Local(threading.local): + _loop = None + _set_called = False + + def __init__(self): + self._local = self._Local() + + def get_event_loop(self): + """Get the event loop. + + This may be None or an instance of EventLoop. + """ + if (self._local._loop is None and + not self._local._set_called and + isinstance(threading.current_thread(), threading._MainThread)): + self.set_event_loop(self.new_event_loop()) + if self._local._loop is None: + raise RuntimeError('There is no current event loop in thread %r.' + % threading.current_thread().name) + return self._local._loop + + def set_event_loop(self, loop): + """Set the event loop.""" + self._local._set_called = True + assert loop is None or isinstance(loop, AbstractEventLoop) + self._local._loop = loop + + def new_event_loop(self): + """Create a new event loop. + + You must call set_event_loop() to make this the current event + loop. + """ + return self._loop_factory() + + +# Event loop policy. The policy itself is always global, even if the +# policy's rules say that there is an event loop per thread (or other +# notion of context). The default policy is installed by the first +# call to get_event_loop_policy(). +_event_loop_policy = None + +# Lock for protecting the on-the-fly creation of the event loop policy. +_lock = threading.Lock() + + +# A TLS for the running event loop, used by _get_running_loop. +class _RunningLoop(threading.local): + _loop = None +_running_loop = _RunningLoop() + + +def _get_running_loop(): + """Return the running event loop or None. + + This is a low-level function intended to be used by event loops. + This function is thread-specific. + """ + return _running_loop._loop + + +def _set_running_loop(loop): + """Set the running event loop. + + This is a low-level function intended to be used by event loops. + This function is thread-specific. + """ + _running_loop._loop = loop + + +def _init_event_loop_policy(): + global _event_loop_policy + with _lock: + if _event_loop_policy is None: # pragma: no branch + from . import DefaultEventLoopPolicy + _event_loop_policy = DefaultEventLoopPolicy() + + +def get_event_loop_policy(): + """Get the current event loop policy.""" + if _event_loop_policy is None: + _init_event_loop_policy() + return _event_loop_policy + + +def set_event_loop_policy(policy): + """Set the current event loop policy. + + If policy is None, the default policy is restored.""" + global _event_loop_policy + assert policy is None or isinstance(policy, AbstractEventLoopPolicy) + _event_loop_policy = policy + + +def get_event_loop(): + """Return an asyncio event loop. + + When called from a coroutine or a callback (e.g. scheduled with call_soon + or similar API), this function will always return the running event loop. + + If there is no running event loop set, the function will return + the result of `get_event_loop_policy().get_event_loop()` call. + """ + current_loop = _get_running_loop() + if current_loop is not None: + return current_loop + return get_event_loop_policy().get_event_loop() + + +def set_event_loop(loop): + """Equivalent to calling get_event_loop_policy().set_event_loop(loop).""" + get_event_loop_policy().set_event_loop(loop) + + +def new_event_loop(): + """Equivalent to calling get_event_loop_policy().new_event_loop().""" + return get_event_loop_policy().new_event_loop() + + +def get_child_watcher(): + """Equivalent to calling get_event_loop_policy().get_child_watcher().""" + return get_event_loop_policy().get_child_watcher() + + +def set_child_watcher(watcher): + """Equivalent to calling + get_event_loop_policy().set_child_watcher(watcher).""" + return get_event_loop_policy().set_child_watcher(watcher) diff --git a/Lib/asyncio/futures.py b/Lib/asyncio/futures.py new file mode 100644 index 0000000000..d11d289307 --- /dev/null +++ b/Lib/asyncio/futures.py @@ -0,0 +1,443 @@ +"""A Future class similar to the one in PEP 3148.""" + +__all__ = ['CancelledError', 'TimeoutError', 'InvalidStateError', + 'Future', 'wrap_future', 'isfuture'] + +import concurrent.futures +import logging +import sys +import traceback + +from . import base_futures +from . import compat +from . import events + + +CancelledError = base_futures.CancelledError +InvalidStateError = base_futures.InvalidStateError +TimeoutError = base_futures.TimeoutError +isfuture = base_futures.isfuture + + +_PENDING = base_futures._PENDING +_CANCELLED = base_futures._CANCELLED +_FINISHED = base_futures._FINISHED + + +STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging + + +class _TracebackLogger: + """Helper to log a traceback upon destruction if not cleared. + + This solves a nasty problem with Futures and Tasks that have an + exception set: if nobody asks for the exception, the exception is + never logged. This violates the Zen of Python: 'Errors should + never pass silently. Unless explicitly silenced.' + + However, we don't want to log the exception as soon as + set_exception() is called: if the calling code is written + properly, it will get the exception and handle it properly. But + we *do* want to log it if result() or exception() was never called + -- otherwise developers waste a lot of time wondering why their + buggy code fails silently. + + An earlier attempt added a __del__() method to the Future class + itself, but this backfired because the presence of __del__() + prevents garbage collection from breaking cycles. A way out of + this catch-22 is to avoid having a __del__() method on the Future + class itself, but instead to have a reference to a helper object + with a __del__() method that logs the traceback, where we ensure + that the helper object doesn't participate in cycles, and only the + Future has a reference to it. + + The helper object is added when set_exception() is called. When + the Future is collected, and the helper is present, the helper + object is also collected, and its __del__() method will log the + traceback. When the Future's result() or exception() method is + called (and a helper object is present), it removes the helper + object, after calling its clear() method to prevent it from + logging. + + One downside is that we do a fair amount of work to extract the + traceback from the exception, even when it is never logged. It + would seem cheaper to just store the exception object, but that + references the traceback, which references stack frames, which may + reference the Future, which references the _TracebackLogger, and + then the _TracebackLogger would be included in a cycle, which is + what we're trying to avoid! As an optimization, we don't + immediately format the exception; we only do the work when + activate() is called, which call is delayed until after all the + Future's callbacks have run. Since usually a Future has at least + one callback (typically set by 'yield from') and usually that + callback extracts the callback, thereby removing the need to + format the exception. + + PS. I don't claim credit for this solution. I first heard of it + in a discussion about closing files when they are collected. + """ + + __slots__ = ('loop', 'source_traceback', 'exc', 'tb') + + def __init__(self, future, exc): + self.loop = future._loop + self.source_traceback = future._source_traceback + self.exc = exc + self.tb = None + + def activate(self): + exc = self.exc + if exc is not None: + self.exc = None + self.tb = traceback.format_exception(exc.__class__, exc, + exc.__traceback__) + + def clear(self): + self.exc = None + self.tb = None + + def __del__(self): + if self.tb: + msg = 'Future/Task exception was never retrieved\n' + if self.source_traceback: + src = ''.join(traceback.format_list(self.source_traceback)) + msg += 'Future/Task created at (most recent call last):\n' + msg += '%s\n' % src.rstrip() + msg += ''.join(self.tb).rstrip() + self.loop.call_exception_handler({'message': msg}) + + +class Future: + """This class is *almost* compatible with concurrent.futures.Future. + + Differences: + + - result() and exception() do not take a timeout argument and + raise an exception when the future isn't done yet. + + - Callbacks registered with add_done_callback() are always called + via the event loop's call_soon_threadsafe(). + + - This class is not compatible with the wait() and as_completed() + methods in the concurrent.futures package. + + (In Python 3.4 or later we may be able to unify the implementations.) + """ + + # Class variables serving as defaults for instance variables. + _state = _PENDING + _result = None + _exception = None + _loop = None + _source_traceback = None + + # This field is used for a dual purpose: + # - Its presence is a marker to declare that a class implements + # the Future protocol (i.e. is intended to be duck-type compatible). + # The value must also be not-None, to enable a subclass to declare + # that it is not compatible by setting this to None. + # - It is set by __iter__() below so that Task._step() can tell + # the difference between `yield from Future()` (correct) vs. + # `yield Future()` (incorrect). + _asyncio_future_blocking = False + + _log_traceback = False # Used for Python 3.4 and later + _tb_logger = None # Used for Python 3.3 only + + def __init__(self, *, loop=None): + """Initialize the future. + + The optional event_loop argument allows explicitly setting the event + loop object used by the future. If it's not provided, the future uses + the default event loop. + """ + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + self._callbacks = [] + if self._loop.get_debug(): + self._source_traceback = traceback.extract_stack(sys._getframe(1)) + + _repr_info = base_futures._future_repr_info + + def __repr__(self): + return '<%s %s>' % (self.__class__.__name__, ' '.join(self._repr_info())) + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if not self._log_traceback: + # set_exception() was not called, or result() or exception() + # has consumed the exception + return + exc = self._exception + context = { + 'message': ('%s exception was never retrieved' + % self.__class__.__name__), + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + + def cancel(self): + """Cancel the future and schedule callbacks. + + If the future is already done or cancelled, return False. Otherwise, + change the future's state to cancelled, schedule the callbacks and + return True. + """ + if self._state != _PENDING: + return False + self._state = _CANCELLED + self._schedule_callbacks() + return True + + def _schedule_callbacks(self): + """Internal: Ask the event loop to call all callbacks. + + The callbacks are scheduled to be called as soon as possible. Also + clears the callback list. + """ + callbacks = self._callbacks[:] + if not callbacks: + return + + self._callbacks[:] = [] + for callback in callbacks: + self._loop.call_soon(callback, self) + + def cancelled(self): + """Return True if the future was cancelled.""" + return self._state == _CANCELLED + + # Don't implement running(); see http://bugs.python.org/issue18699 + + def done(self): + """Return True if the future is done. + + Done means either that a result / exception are available, or that the + future was cancelled. + """ + return self._state != _PENDING + + def result(self): + """Return the result this future represents. + + If the future has been cancelled, raises CancelledError. If the + future's result isn't yet available, raises InvalidStateError. If + the future is done and has an exception set, this exception is raised. + """ + if self._state == _CANCELLED: + raise CancelledError + if self._state != _FINISHED: + raise InvalidStateError('Result is not ready.') + self._log_traceback = False + if self._tb_logger is not None: + self._tb_logger.clear() + self._tb_logger = None + if self._exception is not None: + raise self._exception + return self._result + + def exception(self): + """Return the exception that was set on this future. + + The exception (or None if no exception was set) is returned only if + the future is done. If the future has been cancelled, raises + CancelledError. If the future isn't done yet, raises + InvalidStateError. + """ + if self._state == _CANCELLED: + raise CancelledError + if self._state != _FINISHED: + raise InvalidStateError('Exception is not set.') + self._log_traceback = False + if self._tb_logger is not None: + self._tb_logger.clear() + self._tb_logger = None + return self._exception + + def add_done_callback(self, fn): + """Add a callback to be run when the future becomes done. + + The callback is called with a single argument - the future object. If + the future is already done when this is called, the callback is + scheduled with call_soon. + """ + if self._state != _PENDING: + self._loop.call_soon(fn, self) + else: + self._callbacks.append(fn) + + # New method not in PEP 3148. + + def remove_done_callback(self, fn): + """Remove all instances of a callback from the "call when done" list. + + Returns the number of callbacks removed. + """ + filtered_callbacks = [f for f in self._callbacks if f != fn] + removed_count = len(self._callbacks) - len(filtered_callbacks) + if removed_count: + self._callbacks[:] = filtered_callbacks + return removed_count + + # So-called internal methods (note: no set_running_or_notify_cancel()). + + def set_result(self, result): + """Mark the future done and set its result. + + If the future is already done when this method is called, raises + InvalidStateError. + """ + if self._state != _PENDING: + raise InvalidStateError('{}: {!r}'.format(self._state, self)) + self._result = result + self._state = _FINISHED + self._schedule_callbacks() + + def set_exception(self, exception): + """Mark the future done and set an exception. + + If the future is already done when this method is called, raises + InvalidStateError. + """ + if self._state != _PENDING: + raise InvalidStateError('{}: {!r}'.format(self._state, self)) + if isinstance(exception, type): + exception = exception() + if type(exception) is StopIteration: + raise TypeError("StopIteration interacts badly with generators " + "and cannot be raised into a Future") + self._exception = exception + self._state = _FINISHED + self._schedule_callbacks() + if compat.PY34: + self._log_traceback = True + else: + self._tb_logger = _TracebackLogger(self, exception) + # Arrange for the logger to be activated after all callbacks + # have had a chance to call result() or exception(). + self._loop.call_soon(self._tb_logger.activate) + + def __iter__(self): + if not self.done(): + self._asyncio_future_blocking = True + yield self # This tells Task to wait for completion. + assert self.done(), "yield from wasn't used with future" + return self.result() # May raise too. + + if compat.PY35: + __await__ = __iter__ # make compatible with 'await' expression + + +# Needed for testing purposes. +_PyFuture = Future + + +def _set_result_unless_cancelled(fut, result): + """Helper setting the result only if the future was not cancelled.""" + if fut.cancelled(): + return + fut.set_result(result) + + +def _set_concurrent_future_state(concurrent, source): + """Copy state from a future to a concurrent.futures.Future.""" + assert source.done() + if source.cancelled(): + concurrent.cancel() + if not concurrent.set_running_or_notify_cancel(): + return + exception = source.exception() + if exception is not None: + concurrent.set_exception(exception) + else: + result = source.result() + concurrent.set_result(result) + + +def _copy_future_state(source, dest): + """Internal helper to copy state from another Future. + + The other Future may be a concurrent.futures.Future. + """ + assert source.done() + if dest.cancelled(): + return + assert not dest.done() + if source.cancelled(): + dest.cancel() + else: + exception = source.exception() + if exception is not None: + dest.set_exception(exception) + else: + result = source.result() + dest.set_result(result) + + +def _chain_future(source, destination): + """Chain two futures so that when one completes, so does the other. + + The result (or exception) of source will be copied to destination. + If destination is cancelled, source gets cancelled too. + Compatible with both asyncio.Future and concurrent.futures.Future. + """ + if not isfuture(source) and not isinstance(source, + concurrent.futures.Future): + raise TypeError('A future is required for source argument') + if not isfuture(destination) and not isinstance(destination, + concurrent.futures.Future): + raise TypeError('A future is required for destination argument') + source_loop = source._loop if isfuture(source) else None + dest_loop = destination._loop if isfuture(destination) else None + + def _set_state(future, other): + if isfuture(future): + _copy_future_state(other, future) + else: + _set_concurrent_future_state(future, other) + + def _call_check_cancel(destination): + if destination.cancelled(): + if source_loop is None or source_loop is dest_loop: + source.cancel() + else: + source_loop.call_soon_threadsafe(source.cancel) + + def _call_set_state(source): + if dest_loop is None or dest_loop is source_loop: + _set_state(destination, source) + else: + dest_loop.call_soon_threadsafe(_set_state, destination, source) + + destination.add_done_callback(_call_check_cancel) + source.add_done_callback(_call_set_state) + + +def wrap_future(future, *, loop=None): + """Wrap concurrent.futures.Future object.""" + if isfuture(future): + return future + assert isinstance(future, concurrent.futures.Future), \ + 'concurrent.futures.Future is expected, got {!r}'.format(future) + if loop is None: + loop = events.get_event_loop() + new_future = loop.create_future() + _chain_future(future, new_future) + return new_future + + +try: + import _asyncio +except ImportError: + pass +else: + # _CFuture is needed for tests. + Future = _CFuture = _asyncio.Future diff --git a/Lib/asyncio/locks.py b/Lib/asyncio/locks.py new file mode 100644 index 0000000000..deefc938ec --- /dev/null +++ b/Lib/asyncio/locks.py @@ -0,0 +1,478 @@ +"""Synchronization primitives.""" + +__all__ = ['Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore'] + +import collections + +from . import compat +from . import events +from . import futures +from .coroutines import coroutine + + +class _ContextManager: + """Context manager. + + This enables the following idiom for acquiring and releasing a + lock around a block: + + with (yield from lock): + + + while failing loudly when accidentally using: + + with lock: + + """ + + def __init__(self, lock): + self._lock = lock + + def __enter__(self): + # We have no use for the "as ..." clause in the with + # statement for locks. + return None + + def __exit__(self, *args): + try: + self._lock.release() + finally: + self._lock = None # Crudely prevent reuse. + + +class _ContextManagerMixin: + def __enter__(self): + raise RuntimeError( + '"yield from" should be used as context manager expression') + + def __exit__(self, *args): + # This must exist because __enter__ exists, even though that + # always raises; that's how the with-statement works. + pass + + @coroutine + def __iter__(self): + # This is not a coroutine. It is meant to enable the idiom: + # + # with (yield from lock): + # + # + # as an alternative to: + # + # yield from lock.acquire() + # try: + # + # finally: + # lock.release() + yield from self.acquire() + return _ContextManager(self) + + if compat.PY35: + + def __await__(self): + # To make "with await lock" work. + yield from self.acquire() + return _ContextManager(self) + + @coroutine + def __aenter__(self): + yield from self.acquire() + # We have no use for the "as ..." clause in the with + # statement for locks. + return None + + @coroutine + def __aexit__(self, exc_type, exc, tb): + self.release() + + +class Lock(_ContextManagerMixin): + """Primitive lock objects. + + A primitive lock is a synchronization primitive that is not owned + by a particular coroutine when locked. A primitive lock is in one + of two states, 'locked' or 'unlocked'. + + It is created in the unlocked state. It has two basic methods, + acquire() and release(). When the state is unlocked, acquire() + changes the state to locked and returns immediately. When the + state is locked, acquire() blocks until a call to release() in + another coroutine changes it to unlocked, then the acquire() call + resets it to locked and returns. The release() method should only + be called in the locked state; it changes the state to unlocked + and returns immediately. If an attempt is made to release an + unlocked lock, a RuntimeError will be raised. + + When more than one coroutine is blocked in acquire() waiting for + the state to turn to unlocked, only one coroutine proceeds when a + release() call resets the state to unlocked; first coroutine which + is blocked in acquire() is being processed. + + acquire() is a coroutine and should be called with 'yield from'. + + Locks also support the context management protocol. '(yield from lock)' + should be used as the context manager expression. + + Usage: + + lock = Lock() + ... + yield from lock + try: + ... + finally: + lock.release() + + Context manager usage: + + lock = Lock() + ... + with (yield from lock): + ... + + Lock objects can be tested for locking state: + + if not lock.locked(): + yield from lock + else: + # lock is acquired + ... + + """ + + def __init__(self, *, loop=None): + self._waiters = collections.deque() + self._locked = False + if loop is not None: + self._loop = loop + else: + self._loop = events.get_event_loop() + + def __repr__(self): + res = super().__repr__() + extra = 'locked' if self._locked else 'unlocked' + if self._waiters: + extra = '{},waiters:{}'.format(extra, len(self._waiters)) + return '<{} [{}]>'.format(res[1:-1], extra) + + def locked(self): + """Return True if lock is acquired.""" + return self._locked + + @coroutine + def acquire(self): + """Acquire a lock. + + This method blocks until the lock is unlocked, then sets it to + locked and returns True. + """ + if not self._locked and all(w.cancelled() for w in self._waiters): + self._locked = True + return True + + fut = self._loop.create_future() + self._waiters.append(fut) + try: + yield from fut + self._locked = True + return True + finally: + self._waiters.remove(fut) + + def release(self): + """Release a lock. + + When the lock is locked, reset it to unlocked, and return. + If any other coroutines are blocked waiting for the lock to become + unlocked, allow exactly one of them to proceed. + + When invoked on an unlocked lock, a RuntimeError is raised. + + There is no return value. + """ + if self._locked: + self._locked = False + # Wake up the first waiter who isn't cancelled. + for fut in self._waiters: + if not fut.done(): + fut.set_result(True) + break + else: + raise RuntimeError('Lock is not acquired.') + + +class Event: + """Asynchronous equivalent to threading.Event. + + Class implementing event objects. An event manages a flag that can be set + to true with the set() method and reset to false with the clear() method. + The wait() method blocks until the flag is true. The flag is initially + false. + """ + + def __init__(self, *, loop=None): + self._waiters = collections.deque() + self._value = False + if loop is not None: + self._loop = loop + else: + self._loop = events.get_event_loop() + + def __repr__(self): + res = super().__repr__() + extra = 'set' if self._value else 'unset' + if self._waiters: + extra = '{},waiters:{}'.format(extra, len(self._waiters)) + return '<{} [{}]>'.format(res[1:-1], extra) + + def is_set(self): + """Return True if and only if the internal flag is true.""" + return self._value + + def set(self): + """Set the internal flag to true. All coroutines waiting for it to + become true are awakened. Coroutine that call wait() once the flag is + true will not block at all. + """ + if not self._value: + self._value = True + + for fut in self._waiters: + if not fut.done(): + fut.set_result(True) + + def clear(self): + """Reset the internal flag to false. Subsequently, coroutines calling + wait() will block until set() is called to set the internal flag + to true again.""" + self._value = False + + @coroutine + def wait(self): + """Block until the internal flag is true. + + If the internal flag is true on entry, return True + immediately. Otherwise, block until another coroutine calls + set() to set the flag to true, then return True. + """ + if self._value: + return True + + fut = self._loop.create_future() + self._waiters.append(fut) + try: + yield from fut + return True + finally: + self._waiters.remove(fut) + + +class Condition(_ContextManagerMixin): + """Asynchronous equivalent to threading.Condition. + + This class implements condition variable objects. A condition variable + allows one or more coroutines to wait until they are notified by another + coroutine. + + A new Lock object is created and used as the underlying lock. + """ + + def __init__(self, lock=None, *, loop=None): + if loop is not None: + self._loop = loop + else: + self._loop = events.get_event_loop() + + if lock is None: + lock = Lock(loop=self._loop) + elif lock._loop is not self._loop: + raise ValueError("loop argument must agree with lock") + + self._lock = lock + # Export the lock's locked(), acquire() and release() methods. + self.locked = lock.locked + self.acquire = lock.acquire + self.release = lock.release + + self._waiters = collections.deque() + + def __repr__(self): + res = super().__repr__() + extra = 'locked' if self.locked() else 'unlocked' + if self._waiters: + extra = '{},waiters:{}'.format(extra, len(self._waiters)) + return '<{} [{}]>'.format(res[1:-1], extra) + + @coroutine + def wait(self): + """Wait until notified. + + If the calling coroutine has not acquired the lock when this + method is called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks + until it is awakened by a notify() or notify_all() call for + the same condition variable in another coroutine. Once + awakened, it re-acquires the lock and returns True. + """ + if not self.locked(): + raise RuntimeError('cannot wait on un-acquired lock') + + self.release() + try: + fut = self._loop.create_future() + self._waiters.append(fut) + try: + yield from fut + return True + finally: + self._waiters.remove(fut) + + finally: + # Must reacquire lock even if wait is cancelled + while True: + try: + yield from self.acquire() + break + except futures.CancelledError: + pass + + @coroutine + def wait_for(self, predicate): + """Wait until a predicate becomes true. + + The predicate should be a callable which result will be + interpreted as a boolean value. The final predicate value is + the return value. + """ + result = predicate() + while not result: + yield from self.wait() + result = predicate() + return result + + def notify(self, n=1): + """By default, wake up one coroutine waiting on this condition, if any. + If the calling coroutine has not acquired the lock when this method + is called, a RuntimeError is raised. + + This method wakes up at most n of the coroutines waiting for the + condition variable; it is a no-op if no coroutines are waiting. + + Note: an awakened coroutine does not actually return from its + wait() call until it can reacquire the lock. Since notify() does + not release the lock, its caller should. + """ + if not self.locked(): + raise RuntimeError('cannot notify on un-acquired lock') + + idx = 0 + for fut in self._waiters: + if idx >= n: + break + + if not fut.done(): + idx += 1 + fut.set_result(False) + + def notify_all(self): + """Wake up all threads waiting on this condition. This method acts + like notify(), but wakes up all waiting threads instead of one. If the + calling thread has not acquired the lock when this method is called, + a RuntimeError is raised. + """ + self.notify(len(self._waiters)) + + +class Semaphore(_ContextManagerMixin): + """A Semaphore implementation. + + A semaphore manages an internal counter which is decremented by each + acquire() call and incremented by each release() call. The counter + can never go below zero; when acquire() finds that it is zero, it blocks, + waiting until some other thread calls release(). + + Semaphores also support the context management protocol. + + The optional argument gives the initial value for the internal + counter; it defaults to 1. If the value given is less than 0, + ValueError is raised. + """ + + def __init__(self, value=1, *, loop=None): + if value < 0: + raise ValueError("Semaphore initial value must be >= 0") + self._value = value + self._waiters = collections.deque() + if loop is not None: + self._loop = loop + else: + self._loop = events.get_event_loop() + + def __repr__(self): + res = super().__repr__() + extra = 'locked' if self.locked() else 'unlocked,value:{}'.format( + self._value) + if self._waiters: + extra = '{},waiters:{}'.format(extra, len(self._waiters)) + return '<{} [{}]>'.format(res[1:-1], extra) + + def _wake_up_next(self): + while self._waiters: + waiter = self._waiters.popleft() + if not waiter.done(): + waiter.set_result(None) + return + + def locked(self): + """Returns True if semaphore can not be acquired immediately.""" + return self._value == 0 + + @coroutine + def acquire(self): + """Acquire a semaphore. + + If the internal counter is larger than zero on entry, + decrement it by one and return True immediately. If it is + zero on entry, block, waiting until some other coroutine has + called release() to make it larger than 0, and then return + True. + """ + while self._value <= 0: + fut = self._loop.create_future() + self._waiters.append(fut) + try: + yield from fut + except: + # See the similar code in Queue.get. + fut.cancel() + if self._value > 0 and not fut.cancelled(): + self._wake_up_next() + raise + self._value -= 1 + return True + + def release(self): + """Release a semaphore, incrementing the internal counter by one. + When it was zero on entry and another coroutine is waiting for it to + become larger than zero again, wake up that coroutine. + """ + self._value += 1 + self._wake_up_next() + + +class BoundedSemaphore(Semaphore): + """A bounded semaphore implementation. + + This raises ValueError in release() if it would increase the value + above the initial value. + """ + + def __init__(self, value=1, *, loop=None): + self._bound_value = value + super().__init__(value, loop=loop) + + def release(self): + if self._value >= self._bound_value: + raise ValueError('BoundedSemaphore released too many times') + super().release() diff --git a/Lib/asyncio/log.py b/Lib/asyncio/log.py new file mode 100644 index 0000000000..23a7074afb --- /dev/null +++ b/Lib/asyncio/log.py @@ -0,0 +1,7 @@ +"""Logging configuration.""" + +import logging + + +# Name the logger after the package. +logger = logging.getLogger(__package__) diff --git a/Lib/asyncio/proactor_events.py b/Lib/asyncio/proactor_events.py new file mode 100644 index 0000000000..ff12877fae --- /dev/null +++ b/Lib/asyncio/proactor_events.py @@ -0,0 +1,550 @@ +"""Event loop using a proactor and related classes. + +A proactor is a "notify-on-completion" multiplexer. Currently a +proactor is only implemented on Windows with IOCP. +""" + +__all__ = ['BaseProactorEventLoop'] + +import socket +import warnings + +from . import base_events +from . import compat +from . import constants +from . import futures +from . import sslproto +from . import transports +from .log import logger + + +class _ProactorBasePipeTransport(transports._FlowControlMixin, + transports.BaseTransport): + """Base class for pipe and socket transports.""" + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None): + super().__init__(extra, loop) + self._set_extra(sock) + self._sock = sock + self._protocol = protocol + self._server = server + self._buffer = None # None or bytearray. + self._read_fut = None + self._write_fut = None + self._pending_write = 0 + self._conn_lost = 0 + self._closing = False # Set when close() called. + self._eof_written = False + if self._server is not None: + self._server._attach() + self._loop.call_soon(self._protocol.connection_made, self) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def __repr__(self): + info = [self.__class__.__name__] + if self._sock is None: + info.append('closed') + elif self._closing: + info.append('closing') + if self._sock is not None: + info.append('fd=%s' % self._sock.fileno()) + if self._read_fut is not None: + info.append('read=%s' % self._read_fut) + if self._write_fut is not None: + info.append("write=%r" % self._write_fut) + if self._buffer: + bufsize = len(self._buffer) + info.append('write_bufsize=%s' % bufsize) + if self._eof_written: + info.append('EOF written') + return '<%s>' % ' '.join(info) + + def _set_extra(self, sock): + self._extra['pipe'] = sock + + def set_protocol(self, protocol): + self._protocol = protocol + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closing + + def close(self): + if self._closing: + return + self._closing = True + self._conn_lost += 1 + if not self._buffer and self._write_fut is None: + self._loop.call_soon(self._call_connection_lost, None) + if self._read_fut is not None: + self._read_fut.cancel() + self._read_fut = None + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if self._sock is not None: + warnings.warn("unclosed transport %r" % self, ResourceWarning, + source=self) + self.close() + + def _fatal_error(self, exc, message='Fatal error on pipe transport'): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._force_close(exc) + + def _force_close(self, exc): + if self._closing: + return + self._closing = True + self._conn_lost += 1 + if self._write_fut: + self._write_fut.cancel() + self._write_fut = None + if self._read_fut: + self._read_fut.cancel() + self._read_fut = None + self._pending_write = 0 + self._buffer = None + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + # XXX If there is a pending overlapped read on the other + # end then it may fail with ERROR_NETNAME_DELETED if we + # just close our end. First calling shutdown() seems to + # cure it, but maybe using DisconnectEx() would be better. + if hasattr(self._sock, 'shutdown'): + self._sock.shutdown(socket.SHUT_RDWR) + self._sock.close() + self._sock = None + server = self._server + if server is not None: + server._detach() + self._server = None + + def get_write_buffer_size(self): + size = self._pending_write + if self._buffer is not None: + size += len(self._buffer) + return size + + +class _ProactorReadPipeTransport(_ProactorBasePipeTransport, + transports.ReadTransport): + """Transport for read pipes.""" + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None): + super().__init__(loop, sock, protocol, waiter, extra, server) + self._paused = False + self._loop.call_soon(self._loop_reading) + + def pause_reading(self): + if self._closing: + raise RuntimeError('Cannot pause_reading() when closing') + if self._paused: + raise RuntimeError('Already paused') + self._paused = True + if self._loop.get_debug(): + logger.debug("%r pauses reading", self) + + def resume_reading(self): + if not self._paused: + raise RuntimeError('Not paused') + self._paused = False + if self._closing: + return + self._loop.call_soon(self._loop_reading, self._read_fut) + if self._loop.get_debug(): + logger.debug("%r resumes reading", self) + + def _loop_reading(self, fut=None): + if self._paused: + return + data = None + + try: + if fut is not None: + assert self._read_fut is fut or (self._read_fut is None and + self._closing) + self._read_fut = None + data = fut.result() # deliver data later in "finally" clause + + if self._closing: + # since close() has been called we ignore any read data + data = None + return + + if data == b'': + # we got end-of-file so no need to reschedule a new read + return + + # reschedule a new read + self._read_fut = self._loop._proactor.recv(self._sock, 4096) + except ConnectionAbortedError as exc: + if not self._closing: + self._fatal_error(exc, 'Fatal read error on pipe transport') + elif self._loop.get_debug(): + logger.debug("Read error on pipe transport while closing", + exc_info=True) + except ConnectionResetError as exc: + self._force_close(exc) + except OSError as exc: + self._fatal_error(exc, 'Fatal read error on pipe transport') + except futures.CancelledError: + if not self._closing: + raise + else: + self._read_fut.add_done_callback(self._loop_reading) + finally: + if data: + self._protocol.data_received(data) + elif data is not None: + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + keep_open = self._protocol.eof_received() + if not keep_open: + self.close() + + +class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport, + transports.WriteTransport): + """Transport for write pipes.""" + + def write(self, data): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError('data argument must be byte-ish (%r)', + type(data)) + if self._eof_written: + raise RuntimeError('write_eof() already called') + + if not data: + return + + if self._conn_lost: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + # Observable states: + # 1. IDLE: _write_fut and _buffer both None + # 2. WRITING: _write_fut set; _buffer None + # 3. BACKED UP: _write_fut set; _buffer a bytearray + # We always copy the data, so the caller can't modify it + # while we're still waiting for the I/O to happen. + if self._write_fut is None: # IDLE -> WRITING + assert self._buffer is None + # Pass a copy, except if it's already immutable. + self._loop_writing(data=bytes(data)) + elif not self._buffer: # WRITING -> BACKED UP + # Make a mutable copy which we can extend. + self._buffer = bytearray(data) + self._maybe_pause_protocol() + else: # BACKED UP + # Append to buffer (also copies). + self._buffer.extend(data) + self._maybe_pause_protocol() + + def _loop_writing(self, f=None, data=None): + try: + assert f is self._write_fut + self._write_fut = None + self._pending_write = 0 + if f: + f.result() + if data is None: + data = self._buffer + self._buffer = None + if not data: + if self._closing: + self._loop.call_soon(self._call_connection_lost, None) + if self._eof_written: + self._sock.shutdown(socket.SHUT_WR) + # Now that we've reduced the buffer size, tell the + # protocol to resume writing if it was paused. Note that + # we do this last since the callback is called immediately + # and it may add more data to the buffer (even causing the + # protocol to be paused again). + self._maybe_resume_protocol() + else: + self._write_fut = self._loop._proactor.send(self._sock, data) + if not self._write_fut.done(): + assert self._pending_write == 0 + self._pending_write = len(data) + self._write_fut.add_done_callback(self._loop_writing) + self._maybe_pause_protocol() + else: + self._write_fut.add_done_callback(self._loop_writing) + except ConnectionResetError as exc: + self._force_close(exc) + except OSError as exc: + self._fatal_error(exc, 'Fatal write error on pipe transport') + + def can_write_eof(self): + return True + + def write_eof(self): + self.close() + + def abort(self): + self._force_close(None) + + +class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport): + def __init__(self, *args, **kw): + super().__init__(*args, **kw) + self._read_fut = self._loop._proactor.recv(self._sock, 16) + self._read_fut.add_done_callback(self._pipe_closed) + + def _pipe_closed(self, fut): + if fut.cancelled(): + # the transport has been closed + return + assert fut.result() == b'' + if self._closing: + assert self._read_fut is None + return + assert fut is self._read_fut, (fut, self._read_fut) + self._read_fut = None + if self._write_fut is not None: + self._force_close(BrokenPipeError()) + else: + self.close() + + +class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport, + _ProactorBaseWritePipeTransport, + transports.Transport): + """Transport for duplex pipes.""" + + def can_write_eof(self): + return False + + def write_eof(self): + raise NotImplementedError + + +class _ProactorSocketTransport(_ProactorReadPipeTransport, + _ProactorBaseWritePipeTransport, + transports.Transport): + """Transport for connected sockets.""" + + def _set_extra(self, sock): + self._extra['socket'] = sock + try: + self._extra['sockname'] = sock.getsockname() + except (socket.error, AttributeError): + if self._loop.get_debug(): + logger.warning("getsockname() failed on %r", + sock, exc_info=True) + if 'peername' not in self._extra: + try: + self._extra['peername'] = sock.getpeername() + except (socket.error, AttributeError): + if self._loop.get_debug(): + logger.warning("getpeername() failed on %r", + sock, exc_info=True) + + def can_write_eof(self): + return True + + def write_eof(self): + if self._closing or self._eof_written: + return + self._eof_written = True + if self._write_fut is None: + self._sock.shutdown(socket.SHUT_WR) + + +class BaseProactorEventLoop(base_events.BaseEventLoop): + + def __init__(self, proactor): + super().__init__() + logger.debug('Using proactor: %s', proactor.__class__.__name__) + self._proactor = proactor + self._selector = proactor # convenient alias + self._self_reading_future = None + self._accept_futures = {} # socket file descriptor => Future + proactor.set_loop(self) + self._make_self_pipe() + + def _make_socket_transport(self, sock, protocol, waiter=None, + extra=None, server=None): + return _ProactorSocketTransport(self, sock, protocol, waiter, + extra, server) + + def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None, + *, server_side=False, server_hostname=None, + extra=None, server=None): + if not sslproto._is_sslproto_available(): + raise NotImplementedError("Proactor event loop requires Python 3.5" + " or newer (ssl.MemoryBIO) to support " + "SSL") + + ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter, + server_side, server_hostname) + _ProactorSocketTransport(self, rawsock, ssl_protocol, + extra=extra, server=server) + return ssl_protocol._app_transport + + def _make_duplex_pipe_transport(self, sock, protocol, waiter=None, + extra=None): + return _ProactorDuplexPipeTransport(self, + sock, protocol, waiter, extra) + + def _make_read_pipe_transport(self, sock, protocol, waiter=None, + extra=None): + return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra) + + def _make_write_pipe_transport(self, sock, protocol, waiter=None, + extra=None): + # We want connection_lost() to be called when other end closes + return _ProactorWritePipeTransport(self, + sock, protocol, waiter, extra) + + def close(self): + if self.is_running(): + raise RuntimeError("Cannot close a running event loop") + if self.is_closed(): + return + + # Call these methods before closing the event loop (before calling + # BaseEventLoop.close), because they can schedule callbacks with + # call_soon(), which is forbidden when the event loop is closed. + self._stop_accept_futures() + self._close_self_pipe() + self._proactor.close() + self._proactor = None + self._selector = None + + # Close the event loop + super().close() + + def sock_recv(self, sock, n): + return self._proactor.recv(sock, n) + + def sock_sendall(self, sock, data): + return self._proactor.send(sock, data) + + def sock_connect(self, sock, address): + return self._proactor.connect(sock, address) + + def sock_accept(self, sock): + return self._proactor.accept(sock) + + def _socketpair(self): + raise NotImplementedError + + def _close_self_pipe(self): + if self._self_reading_future is not None: + self._self_reading_future.cancel() + self._self_reading_future = None + self._ssock.close() + self._ssock = None + self._csock.close() + self._csock = None + self._internal_fds -= 1 + + def _make_self_pipe(self): + # A self-socket, really. :-) + self._ssock, self._csock = self._socketpair() + self._ssock.setblocking(False) + self._csock.setblocking(False) + self._internal_fds += 1 + self.call_soon(self._loop_self_reading) + + def _loop_self_reading(self, f=None): + try: + if f is not None: + f.result() # may raise + f = self._proactor.recv(self._ssock, 4096) + except futures.CancelledError: + # _close_self_pipe() has been called, stop waiting for data + return + except Exception as exc: + self.call_exception_handler({ + 'message': 'Error on reading from the event loop self pipe', + 'exception': exc, + 'loop': self, + }) + else: + self._self_reading_future = f + f.add_done_callback(self._loop_self_reading) + + def _write_to_self(self): + self._csock.send(b'\0') + + def _start_serving(self, protocol_factory, sock, + sslcontext=None, server=None, backlog=100): + + def loop(f=None): + try: + if f is not None: + conn, addr = f.result() + if self._debug: + logger.debug("%r got a new connection from %r: %r", + server, addr, conn) + protocol = protocol_factory() + if sslcontext is not None: + self._make_ssl_transport( + conn, protocol, sslcontext, server_side=True, + extra={'peername': addr}, server=server) + else: + self._make_socket_transport( + conn, protocol, + extra={'peername': addr}, server=server) + if self.is_closed(): + return + f = self._proactor.accept(sock) + except OSError as exc: + if sock.fileno() != -1: + self.call_exception_handler({ + 'message': 'Accept failed on a socket', + 'exception': exc, + 'socket': sock, + }) + sock.close() + elif self._debug: + logger.debug("Accept failed on socket %r", + sock, exc_info=True) + except futures.CancelledError: + sock.close() + else: + self._accept_futures[sock.fileno()] = f + f.add_done_callback(loop) + + self.call_soon(loop) + + def _process_events(self, event_list): + # Events are processed in the IocpProactor._poll() method + pass + + def _stop_accept_futures(self): + for future in self._accept_futures.values(): + future.cancel() + self._accept_futures.clear() + + def _stop_serving(self, sock): + self._stop_accept_futures() + self._proactor._stop_serving(sock) + sock.close() diff --git a/Lib/asyncio/protocols.py b/Lib/asyncio/protocols.py new file mode 100644 index 0000000000..80fcac9a82 --- /dev/null +++ b/Lib/asyncio/protocols.py @@ -0,0 +1,134 @@ +"""Abstract Protocol class.""" + +__all__ = ['BaseProtocol', 'Protocol', 'DatagramProtocol', + 'SubprocessProtocol'] + + +class BaseProtocol: + """Common base class for protocol interfaces. + + Usually user implements protocols that derived from BaseProtocol + like Protocol or ProcessProtocol. + + The only case when BaseProtocol should be implemented directly is + write-only transport like write pipe + """ + + def connection_made(self, transport): + """Called when a connection is made. + + The argument is the transport representing the pipe connection. + To receive data, wait for data_received() calls. + When the connection is closed, connection_lost() is called. + """ + + def connection_lost(self, exc): + """Called when the connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + """ + + def pause_writing(self): + """Called when the transport's buffer goes over the high-water mark. + + Pause and resume calls are paired -- pause_writing() is called + once when the buffer goes strictly over the high-water mark + (even if subsequent writes increases the buffer size even + more), and eventually resume_writing() is called once when the + buffer size reaches the low-water mark. + + Note that if the buffer size equals the high-water mark, + pause_writing() is not called -- it must go strictly over. + Conversely, resume_writing() is called when the buffer size is + equal or lower than the low-water mark. These end conditions + are important to ensure that things go as expected when either + mark is zero. + + NOTE: This is the only Protocol callback that is not called + through EventLoop.call_soon() -- if it were, it would have no + effect when it's most needed (when the app keeps writing + without yielding until pause_writing() is called). + """ + + def resume_writing(self): + """Called when the transport's buffer drains below the low-water mark. + + See pause_writing() for details. + """ + + +class Protocol(BaseProtocol): + """Interface for stream protocol. + + The user should implement this interface. They can inherit from + this class but don't need to. The implementations here do + nothing (they don't raise exceptions). + + When the user wants to requests a transport, they pass a protocol + factory to a utility function (e.g., EventLoop.create_connection()). + + When the connection is made successfully, connection_made() is + called with a suitable transport object. Then data_received() + will be called 0 or more times with data (bytes) received from the + transport; finally, connection_lost() will be called exactly once + with either an exception object or None as an argument. + + State machine of calls: + + start -> CM [-> DR*] [-> ER?] -> CL -> end + + * CM: connection_made() + * DR: data_received() + * ER: eof_received() + * CL: connection_lost() + """ + + def data_received(self, data): + """Called when some data is received. + + The argument is a bytes object. + """ + + def eof_received(self): + """Called when the other end calls write_eof() or equivalent. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + """ + + +class DatagramProtocol(BaseProtocol): + """Interface for datagram protocol.""" + + def datagram_received(self, data, addr): + """Called when some datagram is received.""" + + def error_received(self, exc): + """Called when a send or receive operation raises an OSError. + + (Other than BlockingIOError or InterruptedError.) + """ + + +class SubprocessProtocol(BaseProtocol): + """Interface for protocol for subprocess calls.""" + + def pipe_data_received(self, fd, data): + """Called when the subprocess writes data into stdout/stderr pipe. + + fd is int file descriptor. + data is bytes object. + """ + + def pipe_connection_lost(self, fd, exc): + """Called when a file descriptor associated with the child process is + closed. + + fd is the int file descriptor that was closed. + """ + + def process_exited(self): + """Called when subprocess has exited.""" diff --git a/Lib/asyncio/queues.py b/Lib/asyncio/queues.py new file mode 100644 index 0000000000..2d38972c0d --- /dev/null +++ b/Lib/asyncio/queues.py @@ -0,0 +1,253 @@ +"""Queues""" + +__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty'] + +import collections +import heapq + +from . import compat +from . import events +from . import locks +from .coroutines import coroutine + + +class QueueEmpty(Exception): + """Exception raised when Queue.get_nowait() is called on a Queue object + which is empty. + """ + pass + + +class QueueFull(Exception): + """Exception raised when the Queue.put_nowait() method is called on a Queue + object which is full. + """ + pass + + +class Queue: + """A queue, useful for coordinating producer and consumer coroutines. + + If maxsize is less than or equal to zero, the queue size is infinite. If it + is an integer greater than 0, then "yield from put()" will block when the + queue reaches maxsize, until an item is removed by get(). + + Unlike the standard library Queue, you can reliably know this Queue's size + with qsize(), since your single-threaded asyncio application won't be + interrupted between calling qsize() and doing an operation on the Queue. + """ + + def __init__(self, maxsize=0, *, loop=None): + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + self._maxsize = maxsize + + # Futures. + self._getters = collections.deque() + # Futures. + self._putters = collections.deque() + self._unfinished_tasks = 0 + self._finished = locks.Event(loop=self._loop) + self._finished.set() + self._init(maxsize) + + # These three are overridable in subclasses. + + def _init(self, maxsize): + self._queue = collections.deque() + + def _get(self): + return self._queue.popleft() + + def _put(self, item): + self._queue.append(item) + + # End of the overridable methods. + + def _wakeup_next(self, waiters): + # Wake up the next waiter (if any) that isn't cancelled. + while waiters: + waiter = waiters.popleft() + if not waiter.done(): + waiter.set_result(None) + break + + def __repr__(self): + return '<{} at {:#x} {}>'.format( + type(self).__name__, id(self), self._format()) + + def __str__(self): + return '<{} {}>'.format(type(self).__name__, self._format()) + + def _format(self): + result = 'maxsize={!r}'.format(self._maxsize) + if getattr(self, '_queue', None): + result += ' _queue={!r}'.format(list(self._queue)) + if self._getters: + result += ' _getters[{}]'.format(len(self._getters)) + if self._putters: + result += ' _putters[{}]'.format(len(self._putters)) + if self._unfinished_tasks: + result += ' tasks={}'.format(self._unfinished_tasks) + return result + + def qsize(self): + """Number of items in the queue.""" + return len(self._queue) + + @property + def maxsize(self): + """Number of items allowed in the queue.""" + return self._maxsize + + def empty(self): + """Return True if the queue is empty, False otherwise.""" + return not self._queue + + def full(self): + """Return True if there are maxsize items in the queue. + + Note: if the Queue was initialized with maxsize=0 (the default), + then full() is never True. + """ + if self._maxsize <= 0: + return False + else: + return self.qsize() >= self._maxsize + + @coroutine + def put(self, item): + """Put an item into the queue. + + Put an item into the queue. If the queue is full, wait until a free + slot is available before adding item. + + This method is a coroutine. + """ + while self.full(): + putter = self._loop.create_future() + self._putters.append(putter) + try: + yield from putter + except: + putter.cancel() # Just in case putter is not done yet. + if not self.full() and not putter.cancelled(): + # We were woken up by get_nowait(), but can't take + # the call. Wake up the next in line. + self._wakeup_next(self._putters) + raise + return self.put_nowait(item) + + def put_nowait(self, item): + """Put an item into the queue without blocking. + + If no free slot is immediately available, raise QueueFull. + """ + if self.full(): + raise QueueFull + self._put(item) + self._unfinished_tasks += 1 + self._finished.clear() + self._wakeup_next(self._getters) + + @coroutine + def get(self): + """Remove and return an item from the queue. + + If queue is empty, wait until an item is available. + + This method is a coroutine. + """ + while self.empty(): + getter = self._loop.create_future() + self._getters.append(getter) + try: + yield from getter + except: + getter.cancel() # Just in case getter is not done yet. + if not self.empty() and not getter.cancelled(): + # We were woken up by put_nowait(), but can't take + # the call. Wake up the next in line. + self._wakeup_next(self._getters) + raise + return self.get_nowait() + + def get_nowait(self): + """Remove and return an item from the queue. + + Return an item if one is immediately available, else raise QueueEmpty. + """ + if self.empty(): + raise QueueEmpty + item = self._get() + self._wakeup_next(self._putters) + return item + + def task_done(self): + """Indicate that a formerly enqueued task is complete. + + Used by queue consumers. For each get() used to fetch a task, + a subsequent call to task_done() tells the queue that the processing + on the task is complete. + + If a join() is currently blocking, it will resume when all items have + been processed (meaning that a task_done() call was received for every + item that had been put() into the queue). + + Raises ValueError if called more times than there were items placed in + the queue. + """ + if self._unfinished_tasks <= 0: + raise ValueError('task_done() called too many times') + self._unfinished_tasks -= 1 + if self._unfinished_tasks == 0: + self._finished.set() + + @coroutine + def join(self): + """Block until all items in the queue have been gotten and processed. + + The count of unfinished tasks goes up whenever an item is added to the + queue. The count goes down whenever a consumer calls task_done() to + indicate that the item was retrieved and all work on it is complete. + When the count of unfinished tasks drops to zero, join() unblocks. + """ + if self._unfinished_tasks > 0: + yield from self._finished.wait() + + +class PriorityQueue(Queue): + """A subclass of Queue; retrieves entries in priority order (lowest first). + + Entries are typically tuples of the form: (priority number, data). + """ + + def _init(self, maxsize): + self._queue = [] + + def _put(self, item, heappush=heapq.heappush): + heappush(self._queue, item) + + def _get(self, heappop=heapq.heappop): + return heappop(self._queue) + + +class LifoQueue(Queue): + """A subclass of Queue that retrieves most recently added entries first.""" + + def _init(self, maxsize): + self._queue = [] + + def _put(self, item): + self._queue.append(item) + + def _get(self): + return self._queue.pop() + + +if not compat.PY35: + JoinableQueue = Queue + """Deprecated alias for Queue.""" + __all__.append('JoinableQueue') diff --git a/Lib/asyncio/runners.py b/Lib/asyncio/runners.py new file mode 100644 index 0000000000..c3a696ef57 --- /dev/null +++ b/Lib/asyncio/runners.py @@ -0,0 +1,72 @@ +__all__ = ['run'] + +from . import coroutines +from . import events +from . import tasks + + +def run(main, *, debug=False): + """Run a coroutine. + + This function runs the passed coroutine, taking care of + managing the asyncio event loop and finalizing asynchronous + generators. + + This function cannot be called when another asyncio event loop is + running in the same thread. + + If debug is True, the event loop will be run in debug mode. + + This function always creates a new event loop and closes it at the end. + It should be used as a main entry point for asyncio programs, and should + ideally only be called once. + + Example: + + async def main(): + await asyncio.sleep(1) + print('hello') + + asyncio.run(main()) + """ + if events._get_running_loop() is not None: + raise RuntimeError( + "asyncio.run() cannot be called from a running event loop") + + if not coroutines.iscoroutine(main): + raise ValueError("a coroutine was expected, got {!r}".format(main)) + + loop = events.new_event_loop() + try: + events.set_event_loop(loop) + loop.set_debug(debug) + return loop.run_until_complete(main) + finally: + try: + _cancel_all_tasks(loop) + loop.run_until_complete(loop.shutdown_asyncgens()) + finally: + events.set_event_loop(None) + loop.close() + + +def _cancel_all_tasks(loop): + to_cancel = tasks.all_tasks(loop) + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + + loop.run_until_complete( + tasks.gather(*to_cancel, loop=loop, return_exceptions=True)) + + for task in to_cancel: + if task.cancelled(): + continue + if task.exception() is not None: + loop.call_exception_handler({ + 'message': 'unhandled exception during asyncio.run() shutdown', + 'exception': task.exception(), + 'task': task, + }) diff --git a/Lib/asyncio/selector_events.py b/Lib/asyncio/selector_events.py new file mode 100644 index 0000000000..9dbe550b01 --- /dev/null +++ b/Lib/asyncio/selector_events.py @@ -0,0 +1,1142 @@ +"""Event loop using a selector and related classes. + +A selector is a "notify-when-ready" multiplexer. For a subclass which +also includes support for signal handling, see the unix_events sub-module. +""" + +__all__ = ['BaseSelectorEventLoop'] + +import collections +import errno +import functools +import socket +import warnings +import weakref +try: + import ssl +except ImportError: # pragma: no cover + ssl = None + +from . import base_events +from . import compat +from . import constants +from . import events +from . import futures +from . import selectors +from . import transports +from . import sslproto +from .coroutines import coroutine +from .log import logger + + +def _test_selector_event(selector, fd, event): + # Test if the selector is monitoring 'event' events + # for the file descriptor 'fd'. + try: + key = selector.get_key(fd) + except KeyError: + return False + else: + return bool(key.events & event) + + +if hasattr(socket, 'TCP_NODELAY'): + def _set_nodelay(sock): + if (sock.family in {socket.AF_INET, socket.AF_INET6} and + sock.type == socket.SOCK_STREAM and + sock.proto == socket.IPPROTO_TCP): + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) +else: + def _set_nodelay(sock): + pass + + +class BaseSelectorEventLoop(base_events.BaseEventLoop): + """Selector event loop. + + See events.EventLoop for API specification. + """ + + def __init__(self, selector=None): + super().__init__() + + if selector is None: + selector = selectors.DefaultSelector() + logger.debug('Using selector: %s', selector.__class__.__name__) + self._selector = selector + self._make_self_pipe() + self._transports = weakref.WeakValueDictionary() + + def _make_socket_transport(self, sock, protocol, waiter=None, *, + extra=None, server=None): + return _SelectorSocketTransport(self, sock, protocol, waiter, + extra, server) + + def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None, + *, server_side=False, server_hostname=None, + extra=None, server=None): + if not sslproto._is_sslproto_available(): + return self._make_legacy_ssl_transport( + rawsock, protocol, sslcontext, waiter, + server_side=server_side, server_hostname=server_hostname, + extra=extra, server=server) + + ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter, + server_side, server_hostname) + _SelectorSocketTransport(self, rawsock, ssl_protocol, + extra=extra, server=server) + return ssl_protocol._app_transport + + def _make_legacy_ssl_transport(self, rawsock, protocol, sslcontext, + waiter, *, + server_side=False, server_hostname=None, + extra=None, server=None): + # Use the legacy API: SSL_write, SSL_read, etc. The legacy API is used + # on Python 3.4 and older, when ssl.MemoryBIO is not available. + return _SelectorSslTransport( + self, rawsock, protocol, sslcontext, waiter, + server_side, server_hostname, extra, server) + + def _make_datagram_transport(self, sock, protocol, + address=None, waiter=None, extra=None): + return _SelectorDatagramTransport(self, sock, protocol, + address, waiter, extra) + + def close(self): + if self.is_running(): + raise RuntimeError("Cannot close a running event loop") + if self.is_closed(): + return + self._close_self_pipe() + super().close() + if self._selector is not None: + self._selector.close() + self._selector = None + + def _socketpair(self): + raise NotImplementedError + + def _close_self_pipe(self): + self._remove_reader(self._ssock.fileno()) + self._ssock.close() + self._ssock = None + self._csock.close() + self._csock = None + self._internal_fds -= 1 + + def _make_self_pipe(self): + # A self-socket, really. :-) + self._ssock, self._csock = self._socketpair() + self._ssock.setblocking(False) + self._csock.setblocking(False) + self._internal_fds += 1 + self._add_reader(self._ssock.fileno(), self._read_from_self) + + def _process_self_data(self, data): + pass + + def _read_from_self(self): + while True: + try: + data = self._ssock.recv(4096) + if not data: + break + self._process_self_data(data) + except InterruptedError: + continue + except BlockingIOError: + break + + def _write_to_self(self): + # This may be called from a different thread, possibly after + # _close_self_pipe() has been called or even while it is + # running. Guard for self._csock being None or closed. When + # a socket is closed, send() raises OSError (with errno set to + # EBADF, but let's not rely on the exact error code). + csock = self._csock + if csock is not None: + try: + csock.send(b'\0') + except OSError: + if self._debug: + logger.debug("Fail to write a null byte into the " + "self-pipe socket", + exc_info=True) + + def _start_serving(self, protocol_factory, sock, + sslcontext=None, server=None, backlog=100): + self._add_reader(sock.fileno(), self._accept_connection, + protocol_factory, sock, sslcontext, server, backlog) + + def _accept_connection(self, protocol_factory, sock, + sslcontext=None, server=None, backlog=100): + # This method is only called once for each event loop tick where the + # listening socket has triggered an EVENT_READ. There may be multiple + # connections waiting for an .accept() so it is called in a loop. + # See https://bugs.python.org/issue27906 for more details. + for _ in range(backlog): + try: + conn, addr = sock.accept() + if self._debug: + logger.debug("%r got a new connection from %r: %r", + server, addr, conn) + conn.setblocking(False) + except (BlockingIOError, InterruptedError, ConnectionAbortedError): + # Early exit because the socket accept buffer is empty. + return None + except OSError as exc: + # There's nowhere to send the error, so just log it. + if exc.errno in (errno.EMFILE, errno.ENFILE, + errno.ENOBUFS, errno.ENOMEM): + # Some platforms (e.g. Linux keep reporting the FD as + # ready, so we remove the read handler temporarily. + # We'll try again in a while. + self.call_exception_handler({ + 'message': 'socket.accept() out of system resource', + 'exception': exc, + 'socket': sock, + }) + self._remove_reader(sock.fileno()) + self.call_later(constants.ACCEPT_RETRY_DELAY, + self._start_serving, + protocol_factory, sock, sslcontext, server, + backlog) + else: + raise # The event loop will catch, log and ignore it. + else: + extra = {'peername': addr} + accept = self._accept_connection2(protocol_factory, conn, extra, + sslcontext, server) + self.create_task(accept) + + @coroutine + def _accept_connection2(self, protocol_factory, conn, extra, + sslcontext=None, server=None): + protocol = None + transport = None + try: + protocol = protocol_factory() + waiter = self.create_future() + if sslcontext: + transport = self._make_ssl_transport( + conn, protocol, sslcontext, waiter=waiter, + server_side=True, extra=extra, server=server) + else: + transport = self._make_socket_transport( + conn, protocol, waiter=waiter, extra=extra, + server=server) + + try: + yield from waiter + except: + transport.close() + raise + + # It's now up to the protocol to handle the connection. + except Exception as exc: + if self._debug: + context = { + 'message': ('Error on transport creation ' + 'for incoming connection'), + 'exception': exc, + } + if protocol is not None: + context['protocol'] = protocol + if transport is not None: + context['transport'] = transport + self.call_exception_handler(context) + + def _ensure_fd_no_transport(self, fd): + try: + transport = self._transports[fd] + except KeyError: + pass + else: + if not transport.is_closing(): + raise RuntimeError( + 'File descriptor {!r} is used by transport {!r}'.format( + fd, transport)) + + def _add_reader(self, fd, callback, *args): + self._check_closed() + handle = events.Handle(callback, args, self) + try: + key = self._selector.get_key(fd) + except KeyError: + self._selector.register(fd, selectors.EVENT_READ, + (handle, None)) + else: + mask, (reader, writer) = key.events, key.data + self._selector.modify(fd, mask | selectors.EVENT_READ, + (handle, writer)) + if reader is not None: + reader.cancel() + + def _remove_reader(self, fd): + if self.is_closed(): + return False + try: + key = self._selector.get_key(fd) + except KeyError: + return False + else: + mask, (reader, writer) = key.events, key.data + mask &= ~selectors.EVENT_READ + if not mask: + self._selector.unregister(fd) + else: + self._selector.modify(fd, mask, (None, writer)) + + if reader is not None: + reader.cancel() + return True + else: + return False + + def _add_writer(self, fd, callback, *args): + self._check_closed() + handle = events.Handle(callback, args, self) + try: + key = self._selector.get_key(fd) + except KeyError: + self._selector.register(fd, selectors.EVENT_WRITE, + (None, handle)) + else: + mask, (reader, writer) = key.events, key.data + self._selector.modify(fd, mask | selectors.EVENT_WRITE, + (reader, handle)) + if writer is not None: + writer.cancel() + + def _remove_writer(self, fd): + """Remove a writer callback.""" + if self.is_closed(): + return False + try: + key = self._selector.get_key(fd) + except KeyError: + return False + else: + mask, (reader, writer) = key.events, key.data + # Remove both writer and connector. + mask &= ~selectors.EVENT_WRITE + if not mask: + self._selector.unregister(fd) + else: + self._selector.modify(fd, mask, (reader, None)) + + if writer is not None: + writer.cancel() + return True + else: + return False + + def add_reader(self, fd, callback, *args): + """Add a reader callback.""" + self._ensure_fd_no_transport(fd) + return self._add_reader(fd, callback, *args) + + def remove_reader(self, fd): + """Remove a reader callback.""" + self._ensure_fd_no_transport(fd) + return self._remove_reader(fd) + + def add_writer(self, fd, callback, *args): + """Add a writer callback..""" + self._ensure_fd_no_transport(fd) + return self._add_writer(fd, callback, *args) + + def remove_writer(self, fd): + """Remove a writer callback.""" + self._ensure_fd_no_transport(fd) + return self._remove_writer(fd) + + def sock_recv(self, sock, n): + """Receive data from the socket. + + The return value is a bytes object representing the data received. + The maximum amount of data to be received at once is specified by + nbytes. + + This method is a coroutine. + """ + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + fut = self.create_future() + self._sock_recv(fut, False, sock, n) + return fut + + def _sock_recv(self, fut, registered, sock, n): + # _sock_recv() can add itself as an I/O callback if the operation can't + # be done immediately. Don't use it directly, call sock_recv(). + fd = sock.fileno() + if registered: + # Remove the callback early. It should be rare that the + # selector says the fd is ready but the call still returns + # EAGAIN, and I am willing to take a hit in that case in + # order to simplify the common case. + self.remove_reader(fd) + if fut.cancelled(): + return + try: + data = sock.recv(n) + except (BlockingIOError, InterruptedError): + self.add_reader(fd, self._sock_recv, fut, True, sock, n) + except Exception as exc: + fut.set_exception(exc) + else: + fut.set_result(data) + + def sock_sendall(self, sock, data): + """Send data to the socket. + + The socket must be connected to a remote socket. This method continues + to send data from data until either all data has been sent or an + error occurs. None is returned on success. On error, an exception is + raised, and there is no way to determine how much data, if any, was + successfully processed by the receiving end of the connection. + + This method is a coroutine. + """ + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + fut = self.create_future() + if data: + self._sock_sendall(fut, False, sock, data) + else: + fut.set_result(None) + return fut + + def _sock_sendall(self, fut, registered, sock, data): + fd = sock.fileno() + + if registered: + self.remove_writer(fd) + if fut.cancelled(): + return + + try: + n = sock.send(data) + except (BlockingIOError, InterruptedError): + n = 0 + except Exception as exc: + fut.set_exception(exc) + return + + if n == len(data): + fut.set_result(None) + else: + if n: + data = data[n:] + self.add_writer(fd, self._sock_sendall, fut, True, sock, data) + + @coroutine + def sock_connect(self, sock, address): + """Connect to a remote socket at address. + + This method is a coroutine. + """ + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + + if not hasattr(socket, 'AF_UNIX') or sock.family != socket.AF_UNIX: + resolved = base_events._ensure_resolved( + address, family=sock.family, proto=sock.proto, loop=self) + if not resolved.done(): + yield from resolved + _, _, _, _, address = resolved.result()[0] + + fut = self.create_future() + self._sock_connect(fut, sock, address) + return (yield from fut) + + def _sock_connect(self, fut, sock, address): + fd = sock.fileno() + try: + sock.connect(address) + except (BlockingIOError, InterruptedError): + # Issue #23618: When the C function connect() fails with EINTR, the + # connection runs in background. We have to wait until the socket + # becomes writable to be notified when the connection succeed or + # fails. + fut.add_done_callback( + functools.partial(self._sock_connect_done, fd)) + self.add_writer(fd, self._sock_connect_cb, fut, sock, address) + except Exception as exc: + fut.set_exception(exc) + else: + fut.set_result(None) + + def _sock_connect_done(self, fd, fut): + self.remove_writer(fd) + + def _sock_connect_cb(self, fut, sock, address): + if fut.cancelled(): + return + + try: + err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + # Jump to any except clause below. + raise OSError(err, 'Connect call failed %s' % (address,)) + except (BlockingIOError, InterruptedError): + # socket is still registered, the callback will be retried later + pass + except Exception as exc: + fut.set_exception(exc) + else: + fut.set_result(None) + + def sock_accept(self, sock): + """Accept a connection. + + The socket must be bound to an address and listening for connections. + The return value is a pair (conn, address) where conn is a new socket + object usable to send and receive data on the connection, and address + is the address bound to the socket on the other end of the connection. + + This method is a coroutine. + """ + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + fut = self.create_future() + self._sock_accept(fut, False, sock) + return fut + + def _sock_accept(self, fut, registered, sock): + fd = sock.fileno() + if registered: + self.remove_reader(fd) + if fut.cancelled(): + return + try: + conn, address = sock.accept() + conn.setblocking(False) + except (BlockingIOError, InterruptedError): + self.add_reader(fd, self._sock_accept, fut, True, sock) + except Exception as exc: + fut.set_exception(exc) + else: + fut.set_result((conn, address)) + + def _process_events(self, event_list): + for key, mask in event_list: + fileobj, (reader, writer) = key.fileobj, key.data + if mask & selectors.EVENT_READ and reader is not None: + if reader._cancelled: + self._remove_reader(fileobj) + else: + self._add_callback(reader) + if mask & selectors.EVENT_WRITE and writer is not None: + if writer._cancelled: + self._remove_writer(fileobj) + else: + self._add_callback(writer) + + def _stop_serving(self, sock): + self._remove_reader(sock.fileno()) + sock.close() + + +class _SelectorTransport(transports._FlowControlMixin, + transports.Transport): + + max_size = 256 * 1024 # Buffer size passed to recv(). + + _buffer_factory = bytearray # Constructs initial value for self._buffer. + + # Attribute used in the destructor: it must be set even if the constructor + # is not called (see _SelectorSslTransport which may start by raising an + # exception) + _sock = None + + def __init__(self, loop, sock, protocol, extra=None, server=None): + super().__init__(extra, loop) + self._extra['socket'] = sock + self._extra['sockname'] = sock.getsockname() + if 'peername' not in self._extra: + try: + self._extra['peername'] = sock.getpeername() + except socket.error: + self._extra['peername'] = None + self._sock = sock + self._sock_fd = sock.fileno() + self._protocol = protocol + self._protocol_connected = True + self._server = server + self._buffer = self._buffer_factory() + self._conn_lost = 0 # Set when call to connection_lost scheduled. + self._closing = False # Set when close() called. + if self._server is not None: + self._server._attach() + loop._transports[self._sock_fd] = self + + def __repr__(self): + info = [self.__class__.__name__] + if self._sock is None: + info.append('closed') + elif self._closing: + info.append('closing') + info.append('fd=%s' % self._sock_fd) + # test if the transport was closed + if self._loop is not None and not self._loop.is_closed(): + polling = _test_selector_event(self._loop._selector, + self._sock_fd, selectors.EVENT_READ) + if polling: + info.append('read=polling') + else: + info.append('read=idle') + + polling = _test_selector_event(self._loop._selector, + self._sock_fd, + selectors.EVENT_WRITE) + if polling: + state = 'polling' + else: + state = 'idle' + + bufsize = self.get_write_buffer_size() + info.append('write=<%s, bufsize=%s>' % (state, bufsize)) + return '<%s>' % ' '.join(info) + + def abort(self): + self._force_close(None) + + def set_protocol(self, protocol): + self._protocol = protocol + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closing + + def close(self): + if self._closing: + return + self._closing = True + self._loop._remove_reader(self._sock_fd) + if not self._buffer: + self._conn_lost += 1 + self._loop._remove_writer(self._sock_fd) + self._loop.call_soon(self._call_connection_lost, None) + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if self._sock is not None: + warnings.warn("unclosed transport %r" % self, ResourceWarning, + source=self) + self._sock.close() + + def _fatal_error(self, exc, message='Fatal error on transport'): + # Should be called from exception handler only. + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._force_close(exc) + + def _force_close(self, exc): + if self._conn_lost: + return + if self._buffer: + self._buffer.clear() + self._loop._remove_writer(self._sock_fd) + if not self._closing: + self._closing = True + self._loop._remove_reader(self._sock_fd) + self._conn_lost += 1 + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + if self._protocol_connected: + self._protocol.connection_lost(exc) + finally: + self._sock.close() + self._sock = None + self._protocol = None + self._loop = None + server = self._server + if server is not None: + server._detach() + self._server = None + + def get_write_buffer_size(self): + return len(self._buffer) + + +class _SelectorSocketTransport(_SelectorTransport): + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None): + super().__init__(loop, sock, protocol, extra, server) + self._eof = False + self._paused = False + + # Disable the Nagle algorithm -- small writes will be + # sent without waiting for the TCP ACK. This generally + # decreases the latency (in some cases significantly.) + _set_nodelay(self._sock) + + self._loop.call_soon(self._protocol.connection_made, self) + # only start reading when connection_made() has been called + self._loop.call_soon(self._loop._add_reader, + self._sock_fd, self._read_ready) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def pause_reading(self): + if self._closing: + raise RuntimeError('Cannot pause_reading() when closing') + if self._paused: + raise RuntimeError('Already paused') + self._paused = True + self._loop._remove_reader(self._sock_fd) + if self._loop.get_debug(): + logger.debug("%r pauses reading", self) + + def resume_reading(self): + if not self._paused: + raise RuntimeError('Not paused') + self._paused = False + if self._closing: + return + self._loop._add_reader(self._sock_fd, self._read_ready) + if self._loop.get_debug(): + logger.debug("%r resumes reading", self) + + def _read_ready(self): + if self._conn_lost: + return + try: + data = self._sock.recv(self.max_size) + except (BlockingIOError, InterruptedError): + pass + except Exception as exc: + self._fatal_error(exc, 'Fatal read error on socket transport') + else: + if data: + self._protocol.data_received(data) + else: + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + keep_open = self._protocol.eof_received() + if keep_open: + # We're keeping the connection open so the + # protocol can write more, but we still can't + # receive more, so remove the reader callback. + self._loop._remove_reader(self._sock_fd) + else: + self.close() + + def write(self, data): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) + if self._eof: + raise RuntimeError('Cannot call write() after write_eof()') + if not data: + return + + if self._conn_lost: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + # Optimization: try to send now. + try: + n = self._sock.send(data) + except (BlockingIOError, InterruptedError): + pass + except Exception as exc: + self._fatal_error(exc, 'Fatal write error on socket transport') + return + else: + data = data[n:] + if not data: + return + # Not all was written; register write handler. + self._loop._add_writer(self._sock_fd, self._write_ready) + + # Add it to the buffer. + self._buffer.extend(data) + self._maybe_pause_protocol() + + def _write_ready(self): + assert self._buffer, 'Data should not be empty' + + if self._conn_lost: + return + try: + n = self._sock.send(self._buffer) + except (BlockingIOError, InterruptedError): + pass + except Exception as exc: + self._loop._remove_writer(self._sock_fd) + self._buffer.clear() + self._fatal_error(exc, 'Fatal write error on socket transport') + else: + if n: + del self._buffer[:n] + self._maybe_resume_protocol() # May append to buffer. + if not self._buffer: + self._loop._remove_writer(self._sock_fd) + if self._closing: + self._call_connection_lost(None) + elif self._eof: + self._sock.shutdown(socket.SHUT_WR) + + def write_eof(self): + if self._eof: + return + self._eof = True + if not self._buffer: + self._sock.shutdown(socket.SHUT_WR) + + def can_write_eof(self): + return True + + +class _SelectorSslTransport(_SelectorTransport): + + _buffer_factory = bytearray + + def __init__(self, loop, rawsock, protocol, sslcontext, waiter=None, + server_side=False, server_hostname=None, + extra=None, server=None): + if ssl is None: + raise RuntimeError('stdlib ssl module not available') + + if not sslcontext: + sslcontext = sslproto._create_transport_context(server_side, server_hostname) + + wrap_kwargs = { + 'server_side': server_side, + 'do_handshake_on_connect': False, + } + if server_hostname and not server_side: + wrap_kwargs['server_hostname'] = server_hostname + sslsock = sslcontext.wrap_socket(rawsock, **wrap_kwargs) + + super().__init__(loop, sslsock, protocol, extra, server) + # the protocol connection is only made after the SSL handshake + self._protocol_connected = False + + self._server_hostname = server_hostname + self._waiter = waiter + self._sslcontext = sslcontext + self._paused = False + + # SSL-specific extra info. (peercert is set later) + self._extra.update(sslcontext=sslcontext) + + if self._loop.get_debug(): + logger.debug("%r starts SSL handshake", self) + start_time = self._loop.time() + else: + start_time = None + self._on_handshake(start_time) + + def _wakeup_waiter(self, exc=None): + if self._waiter is None: + return + if not self._waiter.cancelled(): + if exc is not None: + self._waiter.set_exception(exc) + else: + self._waiter.set_result(None) + self._waiter = None + + def _on_handshake(self, start_time): + try: + self._sock.do_handshake() + except ssl.SSLWantReadError: + self._loop._add_reader(self._sock_fd, + self._on_handshake, start_time) + return + except ssl.SSLWantWriteError: + self._loop._add_writer(self._sock_fd, + self._on_handshake, start_time) + return + except BaseException as exc: + if self._loop.get_debug(): + logger.warning("%r: SSL handshake failed", + self, exc_info=True) + self._loop._remove_reader(self._sock_fd) + self._loop._remove_writer(self._sock_fd) + self._sock.close() + self._wakeup_waiter(exc) + if isinstance(exc, Exception): + return + else: + raise + + self._loop._remove_reader(self._sock_fd) + self._loop._remove_writer(self._sock_fd) + + peercert = self._sock.getpeercert() + if not hasattr(self._sslcontext, 'check_hostname'): + # Verify hostname if requested, Python 3.4+ uses check_hostname + # and checks the hostname in do_handshake() + if (self._server_hostname and + self._sslcontext.verify_mode != ssl.CERT_NONE): + try: + ssl.match_hostname(peercert, self._server_hostname) + except Exception as exc: + if self._loop.get_debug(): + logger.warning("%r: SSL handshake failed " + "on matching the hostname", + self, exc_info=True) + self._sock.close() + self._wakeup_waiter(exc) + return + + # Add extra info that becomes available after handshake. + self._extra.update(peercert=peercert, + cipher=self._sock.cipher(), + compression=self._sock.compression(), + ssl_object=self._sock, + ) + + self._read_wants_write = False + self._write_wants_read = False + self._loop._add_reader(self._sock_fd, self._read_ready) + self._protocol_connected = True + self._loop.call_soon(self._protocol.connection_made, self) + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(self._wakeup_waiter) + + if self._loop.get_debug(): + dt = self._loop.time() - start_time + logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3) + + def pause_reading(self): + # XXX This is a bit icky, given the comment at the top of + # _read_ready(). Is it possible to evoke a deadlock? I don't + # know, although it doesn't look like it; write() will still + # accept more data for the buffer and eventually the app will + # call resume_reading() again, and things will flow again. + + if self._closing: + raise RuntimeError('Cannot pause_reading() when closing') + if self._paused: + raise RuntimeError('Already paused') + self._paused = True + self._loop._remove_reader(self._sock_fd) + if self._loop.get_debug(): + logger.debug("%r pauses reading", self) + + def resume_reading(self): + if not self._paused: + raise RuntimeError('Not paused') + self._paused = False + if self._closing: + return + self._loop._add_reader(self._sock_fd, self._read_ready) + if self._loop.get_debug(): + logger.debug("%r resumes reading", self) + + def _read_ready(self): + if self._conn_lost: + return + if self._write_wants_read: + self._write_wants_read = False + self._write_ready() + + if self._buffer: + self._loop._add_writer(self._sock_fd, self._write_ready) + + try: + data = self._sock.recv(self.max_size) + except (BlockingIOError, InterruptedError, ssl.SSLWantReadError): + pass + except ssl.SSLWantWriteError: + self._read_wants_write = True + self._loop._remove_reader(self._sock_fd) + self._loop._add_writer(self._sock_fd, self._write_ready) + except Exception as exc: + self._fatal_error(exc, 'Fatal read error on SSL transport') + else: + if data: + self._protocol.data_received(data) + else: + try: + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + keep_open = self._protocol.eof_received() + if keep_open: + logger.warning('returning true from eof_received() ' + 'has no effect when using ssl') + finally: + self.close() + + def _write_ready(self): + if self._conn_lost: + return + if self._read_wants_write: + self._read_wants_write = False + self._read_ready() + + if not (self._paused or self._closing): + self._loop._add_reader(self._sock_fd, self._read_ready) + + if self._buffer: + try: + n = self._sock.send(self._buffer) + except (BlockingIOError, InterruptedError, ssl.SSLWantWriteError): + n = 0 + except ssl.SSLWantReadError: + n = 0 + self._loop._remove_writer(self._sock_fd) + self._write_wants_read = True + except Exception as exc: + self._loop._remove_writer(self._sock_fd) + self._buffer.clear() + self._fatal_error(exc, 'Fatal write error on SSL transport') + return + + if n: + del self._buffer[:n] + + self._maybe_resume_protocol() # May append to buffer. + + if not self._buffer: + self._loop._remove_writer(self._sock_fd) + if self._closing: + self._call_connection_lost(None) + + def write(self, data): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) + if not data: + return + + if self._conn_lost: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + self._loop._add_writer(self._sock_fd, self._write_ready) + + # Add it to the buffer. + self._buffer.extend(data) + self._maybe_pause_protocol() + + def can_write_eof(self): + return False + + +class _SelectorDatagramTransport(_SelectorTransport): + + _buffer_factory = collections.deque + + def __init__(self, loop, sock, protocol, address=None, + waiter=None, extra=None): + super().__init__(loop, sock, protocol, extra) + self._address = address + self._loop.call_soon(self._protocol.connection_made, self) + # only start reading when connection_made() has been called + self._loop.call_soon(self._loop._add_reader, + self._sock_fd, self._read_ready) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def get_write_buffer_size(self): + return sum(len(data) for data, _ in self._buffer) + + def _read_ready(self): + if self._conn_lost: + return + try: + data, addr = self._sock.recvfrom(self.max_size) + except (BlockingIOError, InterruptedError): + pass + except OSError as exc: + self._protocol.error_received(exc) + except Exception as exc: + self._fatal_error(exc, 'Fatal read error on datagram transport') + else: + self._protocol.datagram_received(data, addr) + + def sendto(self, data, addr=None): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) + if not data: + return + + if self._address and addr not in (None, self._address): + raise ValueError('Invalid address: must be None or %s' % + (self._address,)) + + if self._conn_lost and self._address: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + # Attempt to send it right away first. + try: + if self._address: + self._sock.send(data) + else: + self._sock.sendto(data, addr) + return + except (BlockingIOError, InterruptedError): + self._loop._add_writer(self._sock_fd, self._sendto_ready) + except OSError as exc: + self._protocol.error_received(exc) + return + except Exception as exc: + self._fatal_error(exc, + 'Fatal write error on datagram transport') + return + + # Ensure that what we buffer is immutable. + self._buffer.append((bytes(data), addr)) + self._maybe_pause_protocol() + + def _sendto_ready(self): + while self._buffer: + data, addr = self._buffer.popleft() + try: + if self._address: + self._sock.send(data) + else: + self._sock.sendto(data, addr) + except (BlockingIOError, InterruptedError): + self._buffer.appendleft((data, addr)) # Try again later. + break + except OSError as exc: + self._protocol.error_received(exc) + return + except Exception as exc: + self._fatal_error(exc, + 'Fatal write error on datagram transport') + return + + self._maybe_resume_protocol() # May append to buffer. + if not self._buffer: + self._loop._remove_writer(self._sock_fd) + if self._closing: + self._call_connection_lost(None) diff --git a/Lib/asyncio/sslproto.py b/Lib/asyncio/sslproto.py new file mode 100644 index 0000000000..7ad28d6aa0 --- /dev/null +++ b/Lib/asyncio/sslproto.py @@ -0,0 +1,692 @@ +import collections +import warnings +try: + import ssl +except ImportError: # pragma: no cover + ssl = None + +from . import base_events +from . import compat +from . import protocols +from . import transports +from .log import logger + + +def _create_transport_context(server_side, server_hostname): + if server_side: + raise ValueError('Server side SSL needs a valid SSLContext') + + # Client side may pass ssl=True to use a default + # context; in that case the sslcontext passed is None. + # The default is secure for client connections. + if hasattr(ssl, 'create_default_context'): + # Python 3.4+: use up-to-date strong settings. + sslcontext = ssl.create_default_context() + if not server_hostname: + sslcontext.check_hostname = False + else: + # Fallback for Python 3.3. + sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + sslcontext.options |= ssl.OP_NO_SSLv2 + sslcontext.options |= ssl.OP_NO_SSLv3 + sslcontext.set_default_verify_paths() + sslcontext.verify_mode = ssl.CERT_REQUIRED + return sslcontext + + +def _is_sslproto_available(): + return hasattr(ssl, "MemoryBIO") + + +# States of an _SSLPipe. +_UNWRAPPED = "UNWRAPPED" +_DO_HANDSHAKE = "DO_HANDSHAKE" +_WRAPPED = "WRAPPED" +_SHUTDOWN = "SHUTDOWN" + + +class _SSLPipe(object): + """An SSL "Pipe". + + An SSL pipe allows you to communicate with an SSL/TLS protocol instance + through memory buffers. It can be used to implement a security layer for an + existing connection where you don't have access to the connection's file + descriptor, or for some reason you don't want to use it. + + An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode, + data is passed through untransformed. In wrapped mode, application level + data is encrypted to SSL record level data and vice versa. The SSL record + level is the lowest level in the SSL protocol suite and is what travels + as-is over the wire. + + An SslPipe initially is in "unwrapped" mode. To start SSL, call + do_handshake(). To shutdown SSL again, call unwrap(). + """ + + max_size = 256 * 1024 # Buffer size passed to read() + + def __init__(self, context, server_side, server_hostname=None): + """ + The *context* argument specifies the ssl.SSLContext to use. + + The *server_side* argument indicates whether this is a server side or + client side transport. + + The optional *server_hostname* argument can be used to specify the + hostname you are connecting to. You may only specify this parameter if + the _ssl module supports Server Name Indication (SNI). + """ + self._context = context + self._server_side = server_side + self._server_hostname = server_hostname + self._state = _UNWRAPPED + self._incoming = ssl.MemoryBIO() + self._outgoing = ssl.MemoryBIO() + self._sslobj = None + self._need_ssldata = False + self._handshake_cb = None + self._shutdown_cb = None + + @property + def context(self): + """The SSL context passed to the constructor.""" + return self._context + + @property + def ssl_object(self): + """The internal ssl.SSLObject instance. + + Return None if the pipe is not wrapped. + """ + return self._sslobj + + @property + def need_ssldata(self): + """Whether more record level data is needed to complete a handshake + that is currently in progress.""" + return self._need_ssldata + + @property + def wrapped(self): + """ + Whether a security layer is currently in effect. + + Return False during handshake. + """ + return self._state == _WRAPPED + + def do_handshake(self, callback=None): + """Start the SSL handshake. + + Return a list of ssldata. A ssldata element is a list of buffers + + The optional *callback* argument can be used to install a callback that + will be called when the handshake is complete. The callback will be + called with None if successful, else an exception instance. + """ + if self._state != _UNWRAPPED: + raise RuntimeError('handshake in progress or completed') + self._sslobj = self._context.wrap_bio( + self._incoming, self._outgoing, + server_side=self._server_side, + server_hostname=self._server_hostname) + self._state = _DO_HANDSHAKE + self._handshake_cb = callback + ssldata, appdata = self.feed_ssldata(b'', only_handshake=True) + assert len(appdata) == 0 + return ssldata + + def shutdown(self, callback=None): + """Start the SSL shutdown sequence. + + Return a list of ssldata. A ssldata element is a list of buffers + + The optional *callback* argument can be used to install a callback that + will be called when the shutdown is complete. The callback will be + called without arguments. + """ + if self._state == _UNWRAPPED: + raise RuntimeError('no security layer present') + if self._state == _SHUTDOWN: + raise RuntimeError('shutdown in progress') + assert self._state in (_WRAPPED, _DO_HANDSHAKE) + self._state = _SHUTDOWN + self._shutdown_cb = callback + ssldata, appdata = self.feed_ssldata(b'') + assert appdata == [] or appdata == [b''] + return ssldata + + def feed_eof(self): + """Send a potentially "ragged" EOF. + + This method will raise an SSL_ERROR_EOF exception if the EOF is + unexpected. + """ + self._incoming.write_eof() + ssldata, appdata = self.feed_ssldata(b'') + assert appdata == [] or appdata == [b''] + + def feed_ssldata(self, data, only_handshake=False): + """Feed SSL record level data into the pipe. + + The data must be a bytes instance. It is OK to send an empty bytes + instance. This can be used to get ssldata for a handshake initiated by + this endpoint. + + Return a (ssldata, appdata) tuple. The ssldata element is a list of + buffers containing SSL data that needs to be sent to the remote SSL. + + The appdata element is a list of buffers containing plaintext data that + needs to be forwarded to the application. The appdata list may contain + an empty buffer indicating an SSL "close_notify" alert. This alert must + be acknowledged by calling shutdown(). + """ + if self._state == _UNWRAPPED: + # If unwrapped, pass plaintext data straight through. + if data: + appdata = [data] + else: + appdata = [] + return ([], appdata) + + self._need_ssldata = False + if data: + self._incoming.write(data) + + ssldata = [] + appdata = [] + try: + if self._state == _DO_HANDSHAKE: + # Call do_handshake() until it doesn't raise anymore. + self._sslobj.do_handshake() + self._state = _WRAPPED + if self._handshake_cb: + self._handshake_cb(None) + if only_handshake: + return (ssldata, appdata) + # Handshake done: execute the wrapped block + + if self._state == _WRAPPED: + # Main state: read data from SSL until close_notify + while True: + chunk = self._sslobj.read(self.max_size) + appdata.append(chunk) + if not chunk: # close_notify + break + + elif self._state == _SHUTDOWN: + # Call shutdown() until it doesn't raise anymore. + self._sslobj.unwrap() + self._sslobj = None + self._state = _UNWRAPPED + if self._shutdown_cb: + self._shutdown_cb() + + elif self._state == _UNWRAPPED: + # Drain possible plaintext data after close_notify. + appdata.append(self._incoming.read()) + except (ssl.SSLError, ssl.CertificateError) as exc: + if getattr(exc, 'errno', None) not in ( + ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE, + ssl.SSL_ERROR_SYSCALL): + if self._state == _DO_HANDSHAKE and self._handshake_cb: + self._handshake_cb(exc) + raise + self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ) + + # Check for record level data that needs to be sent back. + # Happens for the initial handshake and renegotiations. + if self._outgoing.pending: + ssldata.append(self._outgoing.read()) + return (ssldata, appdata) + + def feed_appdata(self, data, offset=0): + """Feed plaintext data into the pipe. + + Return an (ssldata, offset) tuple. The ssldata element is a list of + buffers containing record level data that needs to be sent to the + remote SSL instance. The offset is the number of plaintext bytes that + were processed, which may be less than the length of data. + + NOTE: In case of short writes, this call MUST be retried with the SAME + buffer passed into the *data* argument (i.e. the id() must be the + same). This is an OpenSSL requirement. A further particularity is that + a short write will always have offset == 0, because the _ssl module + does not enable partial writes. And even though the offset is zero, + there will still be encrypted data in ssldata. + """ + assert 0 <= offset <= len(data) + if self._state == _UNWRAPPED: + # pass through data in unwrapped mode + if offset < len(data): + ssldata = [data[offset:]] + else: + ssldata = [] + return (ssldata, len(data)) + + ssldata = [] + view = memoryview(data) + while True: + self._need_ssldata = False + try: + if offset < len(view): + offset += self._sslobj.write(view[offset:]) + except ssl.SSLError as exc: + # It is not allowed to call write() after unwrap() until the + # close_notify is acknowledged. We return the condition to the + # caller as a short write. + if exc.reason == 'PROTOCOL_IS_SHUTDOWN': + exc.errno = ssl.SSL_ERROR_WANT_READ + if exc.errno not in (ssl.SSL_ERROR_WANT_READ, + ssl.SSL_ERROR_WANT_WRITE, + ssl.SSL_ERROR_SYSCALL): + raise + self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ) + + # See if there's any record level data back for us. + if self._outgoing.pending: + ssldata.append(self._outgoing.read()) + if offset == len(view) or self._need_ssldata: + break + return (ssldata, offset) + + +class _SSLProtocolTransport(transports._FlowControlMixin, + transports.Transport): + + def __init__(self, loop, ssl_protocol, app_protocol): + self._loop = loop + # SSLProtocol instance + self._ssl_protocol = ssl_protocol + self._app_protocol = app_protocol + self._closed = False + + def get_extra_info(self, name, default=None): + """Get optional transport information.""" + return self._ssl_protocol._get_extra_info(name, default) + + def set_protocol(self, protocol): + self._app_protocol = protocol + + def get_protocol(self): + return self._app_protocol + + def is_closing(self): + return self._closed + + def close(self): + """Close the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) called + with None as its argument. + """ + self._closed = True + self._ssl_protocol._start_shutdown() + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if not self._closed: + warnings.warn("unclosed transport %r" % self, ResourceWarning, + source=self) + self.close() + + def pause_reading(self): + """Pause the receiving end. + + No data will be passed to the protocol's data_received() + method until resume_reading() is called. + """ + self._ssl_protocol._transport.pause_reading() + + def resume_reading(self): + """Resume the receiving end. + + Data received will once again be passed to the protocol's + data_received() method. + """ + self._ssl_protocol._transport.resume_reading() + + def set_write_buffer_limits(self, high=None, low=None): + """Set the high- and low-water limits for write flow control. + + These two values control when to call the protocol's + pause_writing() and resume_writing() methods. If specified, + the low-water limit must be less than or equal to the + high-water limit. Neither value can be negative. + + The defaults are implementation-specific. If only the + high-water limit is given, the low-water limit defaults to an + implementation-specific value less than or equal to the + high-water limit. Setting high to zero forces low to zero as + well, and causes pause_writing() to be called whenever the + buffer becomes non-empty. Setting low to zero causes + resume_writing() to be called only once the buffer is empty. + Use of zero for either limit is generally sub-optimal as it + reduces opportunities for doing I/O and computation + concurrently. + """ + self._ssl_protocol._transport.set_write_buffer_limits(high, low) + + def get_write_buffer_size(self): + """Return the current size of the write buffer.""" + return self._ssl_protocol._transport.get_write_buffer_size() + + def write(self, data): + """Write some data bytes to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + """ + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError("data: expecting a bytes-like instance, got {!r}" + .format(type(data).__name__)) + if not data: + return + self._ssl_protocol._write_appdata(data) + + def can_write_eof(self): + """Return True if this transport supports write_eof(), False if not.""" + return False + + def abort(self): + """Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + self._ssl_protocol._abort() + + +class SSLProtocol(protocols.Protocol): + """SSL protocol. + + Implementation of SSL on top of a socket using incoming and outgoing + buffers which are ssl.MemoryBIO objects. + """ + + def __init__(self, loop, app_protocol, sslcontext, waiter, + server_side=False, server_hostname=None, + call_connection_made=True): + if ssl is None: + raise RuntimeError('stdlib ssl module not available') + + if not sslcontext: + sslcontext = _create_transport_context(server_side, server_hostname) + + self._server_side = server_side + if server_hostname and not server_side: + self._server_hostname = server_hostname + else: + self._server_hostname = None + self._sslcontext = sslcontext + # SSL-specific extra info. More info are set when the handshake + # completes. + self._extra = dict(sslcontext=sslcontext) + + # App data write buffering + self._write_backlog = collections.deque() + self._write_buffer_size = 0 + + self._waiter = waiter + self._loop = loop + self._app_protocol = app_protocol + self._app_transport = _SSLProtocolTransport(self._loop, + self, self._app_protocol) + # _SSLPipe instance (None until the connection is made) + self._sslpipe = None + self._session_established = False + self._in_handshake = False + self._in_shutdown = False + # transport, ex: SelectorSocketTransport + self._transport = None + self._call_connection_made = call_connection_made + + def _wakeup_waiter(self, exc=None): + if self._waiter is None: + return + if not self._waiter.cancelled(): + if exc is not None: + self._waiter.set_exception(exc) + else: + self._waiter.set_result(None) + self._waiter = None + + def connection_made(self, transport): + """Called when the low-level connection is made. + + Start the SSL handshake. + """ + self._transport = transport + self._sslpipe = _SSLPipe(self._sslcontext, + self._server_side, + self._server_hostname) + self._start_handshake() + + def connection_lost(self, exc): + """Called when the low-level connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + """ + if self._session_established: + self._session_established = False + self._loop.call_soon(self._app_protocol.connection_lost, exc) + self._transport = None + self._app_transport = None + self._wakeup_waiter(exc) + + def pause_writing(self): + """Called when the low-level transport's buffer goes over + the high-water mark. + """ + self._app_protocol.pause_writing() + + def resume_writing(self): + """Called when the low-level transport's buffer drains below + the low-water mark. + """ + self._app_protocol.resume_writing() + + def data_received(self, data): + """Called when some SSL data is received. + + The argument is a bytes object. + """ + try: + ssldata, appdata = self._sslpipe.feed_ssldata(data) + except ssl.SSLError as e: + if self._loop.get_debug(): + logger.warning('%r: SSL error %s (reason %s)', + self, e.errno, e.reason) + self._abort() + return + + for chunk in ssldata: + self._transport.write(chunk) + + for chunk in appdata: + if chunk: + self._app_protocol.data_received(chunk) + else: + self._start_shutdown() + break + + def eof_received(self): + """Called when the other end of the low-level stream + is half-closed. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + """ + try: + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + + self._wakeup_waiter(ConnectionResetError) + + if not self._in_handshake: + keep_open = self._app_protocol.eof_received() + if keep_open: + logger.warning('returning true from eof_received() ' + 'has no effect when using ssl') + finally: + self._transport.close() + + def _get_extra_info(self, name, default=None): + if name in self._extra: + return self._extra[name] + else: + return self._transport.get_extra_info(name, default) + + def _start_shutdown(self): + if self._in_shutdown: + return + self._in_shutdown = True + self._write_appdata(b'') + + def _write_appdata(self, data): + self._write_backlog.append((data, 0)) + self._write_buffer_size += len(data) + self._process_write_backlog() + + def _start_handshake(self): + if self._loop.get_debug(): + logger.debug("%r starts SSL handshake", self) + self._handshake_start_time = self._loop.time() + else: + self._handshake_start_time = None + self._in_handshake = True + # (b'', 1) is a special value in _process_write_backlog() to do + # the SSL handshake + self._write_backlog.append((b'', 1)) + self._loop.call_soon(self._process_write_backlog) + + def _on_handshake_complete(self, handshake_exc): + self._in_handshake = False + + sslobj = self._sslpipe.ssl_object + try: + if handshake_exc is not None: + raise handshake_exc + + peercert = sslobj.getpeercert() + if not hasattr(self._sslcontext, 'check_hostname'): + # Verify hostname if requested, Python 3.4+ uses check_hostname + # and checks the hostname in do_handshake() + if (self._server_hostname + and self._sslcontext.verify_mode != ssl.CERT_NONE): + ssl.match_hostname(peercert, self._server_hostname) + except BaseException as exc: + if self._loop.get_debug(): + if isinstance(exc, ssl.CertificateError): + logger.warning("%r: SSL handshake failed " + "on verifying the certificate", + self, exc_info=True) + else: + logger.warning("%r: SSL handshake failed", + self, exc_info=True) + self._transport.close() + if isinstance(exc, Exception): + self._wakeup_waiter(exc) + return + else: + raise + + if self._loop.get_debug(): + dt = self._loop.time() - self._handshake_start_time + logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3) + + # Add extra info that becomes available after handshake. + self._extra.update(peercert=peercert, + cipher=sslobj.cipher(), + compression=sslobj.compression(), + ssl_object=sslobj, + ) + if self._call_connection_made: + self._app_protocol.connection_made(self._app_transport) + self._wakeup_waiter() + self._session_established = True + # In case transport.write() was already called. Don't call + # immediately _process_write_backlog(), but schedule it: + # _on_handshake_complete() can be called indirectly from + # _process_write_backlog(), and _process_write_backlog() is not + # reentrant. + self._loop.call_soon(self._process_write_backlog) + + def _process_write_backlog(self): + # Try to make progress on the write backlog. + if self._transport is None: + return + + try: + for i in range(len(self._write_backlog)): + data, offset = self._write_backlog[0] + if data: + ssldata, offset = self._sslpipe.feed_appdata(data, offset) + elif offset: + ssldata = self._sslpipe.do_handshake( + self._on_handshake_complete) + offset = 1 + else: + ssldata = self._sslpipe.shutdown(self._finalize) + offset = 1 + + for chunk in ssldata: + self._transport.write(chunk) + + if offset < len(data): + self._write_backlog[0] = (data, offset) + # A short write means that a write is blocked on a read + # We need to enable reading if it is paused! + assert self._sslpipe.need_ssldata + if self._transport._paused: + self._transport.resume_reading() + break + + # An entire chunk from the backlog was processed. We can + # delete it and reduce the outstanding buffer size. + del self._write_backlog[0] + self._write_buffer_size -= len(data) + except BaseException as exc: + if self._in_handshake: + # BaseExceptions will be re-raised in _on_handshake_complete. + self._on_handshake_complete(exc) + else: + self._fatal_error(exc, 'Fatal error on SSL transport') + if not isinstance(exc, Exception): + # BaseException + raise + + def _fatal_error(self, exc, message='Fatal error on transport'): + # Should be called from exception handler only. + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self._transport, + 'protocol': self, + }) + if self._transport: + self._transport._force_close(exc) + + def _finalize(self): + if self._transport is not None: + self._transport.close() + + def _abort(self): + if self._transport is not None: + try: + self._transport.abort() + finally: + self._finalize() diff --git a/Lib/asyncio/streams.py b/Lib/asyncio/streams.py new file mode 100644 index 0000000000..a82cc79aca --- /dev/null +++ b/Lib/asyncio/streams.py @@ -0,0 +1,695 @@ +"""Stream-related things.""" + +__all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol', + 'open_connection', 'start_server', + 'IncompleteReadError', + 'LimitOverrunError', + ] + +import socket + +if hasattr(socket, 'AF_UNIX'): + __all__.extend(['open_unix_connection', 'start_unix_server']) + +from . import coroutines +from . import compat +from . import events +from . import protocols +from .coroutines import coroutine +from .log import logger + + +_DEFAULT_LIMIT = 2 ** 16 + + +class IncompleteReadError(EOFError): + """ + Incomplete read error. Attributes: + + - partial: read bytes string before the end of stream was reached + - expected: total number of expected bytes (or None if unknown) + """ + def __init__(self, partial, expected): + super().__init__("%d bytes read on a total of %r expected bytes" + % (len(partial), expected)) + self.partial = partial + self.expected = expected + + +class LimitOverrunError(Exception): + """Reached the buffer limit while looking for a separator. + + Attributes: + - consumed: total number of to be consumed bytes. + """ + def __init__(self, message, consumed): + super().__init__(message) + self.consumed = consumed + + +@coroutine +def open_connection(host=None, port=None, *, + loop=None, limit=_DEFAULT_LIMIT, **kwds): + """A wrapper for create_connection() returning a (reader, writer) pair. + + The reader returned is a StreamReader instance; the writer is a + StreamWriter instance. + + The arguments are all the usual arguments to create_connection() + except protocol_factory; most common are positional host and port, + with various optional keyword arguments following. + + Additional optional keyword arguments are loop (to set the event loop + instance to use) and limit (to set the buffer limit passed to the + StreamReader). + + (If you want to customize the StreamReader and/or + StreamReaderProtocol classes, just copy the code -- there's + really nothing special here except some convenience.) + """ + if loop is None: + loop = events.get_event_loop() + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, loop=loop) + transport, _ = yield from loop.create_connection( + lambda: protocol, host, port, **kwds) + writer = StreamWriter(transport, protocol, reader, loop) + return reader, writer + + +@coroutine +def start_server(client_connected_cb, host=None, port=None, *, + loop=None, limit=_DEFAULT_LIMIT, **kwds): + """Start a socket server, call back for each client connected. + + The first parameter, `client_connected_cb`, takes two parameters: + client_reader, client_writer. client_reader is a StreamReader + object, while client_writer is a StreamWriter object. This + parameter can either be a plain callback function or a coroutine; + if it is a coroutine, it will be automatically converted into a + Task. + + The rest of the arguments are all the usual arguments to + loop.create_server() except protocol_factory; most common are + positional host and port, with various optional keyword arguments + following. The return value is the same as loop.create_server(). + + Additional optional keyword arguments are loop (to set the event loop + instance to use) and limit (to set the buffer limit passed to the + StreamReader). + + The return value is the same as loop.create_server(), i.e. a + Server object which can be used to stop the service. + """ + if loop is None: + loop = events.get_event_loop() + + def factory(): + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, client_connected_cb, + loop=loop) + return protocol + + return (yield from loop.create_server(factory, host, port, **kwds)) + + +if hasattr(socket, 'AF_UNIX'): + # UNIX Domain Sockets are supported on this platform + + @coroutine + def open_unix_connection(path=None, *, + loop=None, limit=_DEFAULT_LIMIT, **kwds): + """Similar to `open_connection` but works with UNIX Domain Sockets.""" + if loop is None: + loop = events.get_event_loop() + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, loop=loop) + transport, _ = yield from loop.create_unix_connection( + lambda: protocol, path, **kwds) + writer = StreamWriter(transport, protocol, reader, loop) + return reader, writer + + @coroutine + def start_unix_server(client_connected_cb, path=None, *, + loop=None, limit=_DEFAULT_LIMIT, **kwds): + """Similar to `start_server` but works with UNIX Domain Sockets.""" + if loop is None: + loop = events.get_event_loop() + + def factory(): + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, client_connected_cb, + loop=loop) + return protocol + + return (yield from loop.create_unix_server(factory, path, **kwds)) + + +class FlowControlMixin(protocols.Protocol): + """Reusable flow control logic for StreamWriter.drain(). + + This implements the protocol methods pause_writing(), + resume_reading() and connection_lost(). If the subclass overrides + these it must call the super methods. + + StreamWriter.drain() must wait for _drain_helper() coroutine. + """ + + def __init__(self, loop=None): + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + self._paused = False + self._drain_waiter = None + self._connection_lost = False + + def pause_writing(self): + assert not self._paused + self._paused = True + if self._loop.get_debug(): + logger.debug("%r pauses writing", self) + + def resume_writing(self): + assert self._paused + self._paused = False + if self._loop.get_debug(): + logger.debug("%r resumes writing", self) + + waiter = self._drain_waiter + if waiter is not None: + self._drain_waiter = None + if not waiter.done(): + waiter.set_result(None) + + def connection_lost(self, exc): + self._connection_lost = True + # Wake up the writer if currently paused. + if not self._paused: + return + waiter = self._drain_waiter + if waiter is None: + return + self._drain_waiter = None + if waiter.done(): + return + if exc is None: + waiter.set_result(None) + else: + waiter.set_exception(exc) + + @coroutine + def _drain_helper(self): + if self._connection_lost: + raise ConnectionResetError('Connection lost') + if not self._paused: + return + waiter = self._drain_waiter + assert waiter is None or waiter.cancelled() + waiter = self._loop.create_future() + self._drain_waiter = waiter + yield from waiter + + +class StreamReaderProtocol(FlowControlMixin, protocols.Protocol): + """Helper class to adapt between Protocol and StreamReader. + + (This is a helper class instead of making StreamReader itself a + Protocol subclass, because the StreamReader has other potential + uses, and to prevent the user of the StreamReader to accidentally + call inappropriate methods of the protocol.) + """ + + def __init__(self, stream_reader, client_connected_cb=None, loop=None): + super().__init__(loop=loop) + self._stream_reader = stream_reader + self._stream_writer = None + self._client_connected_cb = client_connected_cb + self._over_ssl = False + + def connection_made(self, transport): + self._stream_reader.set_transport(transport) + self._over_ssl = transport.get_extra_info('sslcontext') is not None + if self._client_connected_cb is not None: + self._stream_writer = StreamWriter(transport, self, + self._stream_reader, + self._loop) + res = self._client_connected_cb(self._stream_reader, + self._stream_writer) + if coroutines.iscoroutine(res): + self._loop.create_task(res) + + def connection_lost(self, exc): + if self._stream_reader is not None: + if exc is None: + self._stream_reader.feed_eof() + else: + self._stream_reader.set_exception(exc) + super().connection_lost(exc) + self._stream_reader = None + self._stream_writer = None + + def data_received(self, data): + self._stream_reader.feed_data(data) + + def eof_received(self): + self._stream_reader.feed_eof() + if self._over_ssl: + # Prevent a warning in SSLProtocol.eof_received: + # "returning true from eof_received() + # has no effect when using ssl" + return False + return True + + +class StreamWriter: + """Wraps a Transport. + + This exposes write(), writelines(), [can_]write_eof(), + get_extra_info() and close(). It adds drain() which returns an + optional Future on which you can wait for flow control. It also + adds a transport property which references the Transport + directly. + """ + + def __init__(self, transport, protocol, reader, loop): + self._transport = transport + self._protocol = protocol + # drain() expects that the reader has an exception() method + assert reader is None or isinstance(reader, StreamReader) + self._reader = reader + self._loop = loop + + def __repr__(self): + info = [self.__class__.__name__, 'transport=%r' % self._transport] + if self._reader is not None: + info.append('reader=%r' % self._reader) + return '<%s>' % ' '.join(info) + + @property + def transport(self): + return self._transport + + def write(self, data): + self._transport.write(data) + + def writelines(self, data): + self._transport.writelines(data) + + def write_eof(self): + return self._transport.write_eof() + + def can_write_eof(self): + return self._transport.can_write_eof() + + def close(self): + return self._transport.close() + + def get_extra_info(self, name, default=None): + return self._transport.get_extra_info(name, default) + + @coroutine + def drain(self): + """Flush the write buffer. + + The intended use is to write + + w.write(data) + yield from w.drain() + """ + if self._reader is not None: + exc = self._reader.exception() + if exc is not None: + raise exc + if self._transport is not None: + if self._transport.is_closing(): + # Yield to the event loop so connection_lost() may be + # called. Without this, _drain_helper() would return + # immediately, and code that calls + # write(...); yield from drain() + # in a loop would never call connection_lost(), so it + # would not see an error when the socket is closed. + yield + yield from self._protocol._drain_helper() + + +class StreamReader: + + def __init__(self, limit=_DEFAULT_LIMIT, loop=None): + # The line length limit is a security feature; + # it also doubles as half the buffer limit. + + if limit <= 0: + raise ValueError('Limit cannot be <= 0') + + self._limit = limit + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + self._buffer = bytearray() + self._eof = False # Whether we're done. + self._waiter = None # A future used by _wait_for_data() + self._exception = None + self._transport = None + self._paused = False + + def __repr__(self): + info = ['StreamReader'] + if self._buffer: + info.append('%d bytes' % len(self._buffer)) + if self._eof: + info.append('eof') + if self._limit != _DEFAULT_LIMIT: + info.append('l=%d' % self._limit) + if self._waiter: + info.append('w=%r' % self._waiter) + if self._exception: + info.append('e=%r' % self._exception) + if self._transport: + info.append('t=%r' % self._transport) + if self._paused: + info.append('paused') + return '<%s>' % ' '.join(info) + + def exception(self): + return self._exception + + def set_exception(self, exc): + self._exception = exc + + waiter = self._waiter + if waiter is not None: + self._waiter = None + if not waiter.cancelled(): + waiter.set_exception(exc) + + def _wakeup_waiter(self): + """Wakeup read*() functions waiting for data or EOF.""" + waiter = self._waiter + if waiter is not None: + self._waiter = None + if not waiter.cancelled(): + waiter.set_result(None) + + def set_transport(self, transport): + assert self._transport is None, 'Transport already set' + self._transport = transport + + def _maybe_resume_transport(self): + if self._paused and len(self._buffer) <= self._limit: + self._paused = False + self._transport.resume_reading() + + def feed_eof(self): + self._eof = True + self._wakeup_waiter() + + def at_eof(self): + """Return True if the buffer is empty and 'feed_eof' was called.""" + return self._eof and not self._buffer + + def feed_data(self, data): + assert not self._eof, 'feed_data after feed_eof' + + if not data: + return + + self._buffer.extend(data) + self._wakeup_waiter() + + if (self._transport is not None and + not self._paused and + len(self._buffer) > 2 * self._limit): + try: + self._transport.pause_reading() + except NotImplementedError: + # The transport can't be paused. + # We'll just have to buffer all data. + # Forget the transport so we don't keep trying. + self._transport = None + else: + self._paused = True + + @coroutine + def _wait_for_data(self, func_name): + """Wait until feed_data() or feed_eof() is called. + + If stream was paused, automatically resume it. + """ + # StreamReader uses a future to link the protocol feed_data() method + # to a read coroutine. Running two read coroutines at the same time + # would have an unexpected behaviour. It would not possible to know + # which coroutine would get the next data. + if self._waiter is not None: + raise RuntimeError('%s() called while another coroutine is ' + 'already waiting for incoming data' % func_name) + + assert not self._eof, '_wait_for_data after EOF' + + # Waiting for data while paused will make deadlock, so prevent it. + # This is essential for readexactly(n) for case when n > self._limit. + if self._paused: + self._paused = False + self._transport.resume_reading() + + self._waiter = self._loop.create_future() + try: + yield from self._waiter + finally: + self._waiter = None + + @coroutine + def readline(self): + """Read chunk of data from the stream until newline (b'\n') is found. + + On success, return chunk that ends with newline. If only partial + line can be read due to EOF, return incomplete line without + terminating newline. When EOF was reached while no bytes read, empty + bytes object is returned. + + If limit is reached, ValueError will be raised. In that case, if + newline was found, complete line including newline will be removed + from internal buffer. Else, internal buffer will be cleared. Limit is + compared against part of the line without newline. + + If stream was paused, this function will automatically resume it if + needed. + """ + sep = b'\n' + seplen = len(sep) + try: + line = yield from self.readuntil(sep) + except IncompleteReadError as e: + return e.partial + except LimitOverrunError as e: + if self._buffer.startswith(sep, e.consumed): + del self._buffer[:e.consumed + seplen] + else: + self._buffer.clear() + self._maybe_resume_transport() + raise ValueError(e.args[0]) + return line + + @coroutine + def readuntil(self, separator=b'\n'): + """Read data from the stream until ``separator`` is found. + + On success, the data and separator will be removed from the + internal buffer (consumed). Returned data will include the + separator at the end. + + Configured stream limit is used to check result. Limit sets the + maximal length of data that can be returned, not counting the + separator. + + If an EOF occurs and the complete separator is still not found, + an IncompleteReadError exception will be raised, and the internal + buffer will be reset. The IncompleteReadError.partial attribute + may contain the separator partially. + + If the data cannot be read because of over limit, a + LimitOverrunError exception will be raised, and the data + will be left in the internal buffer, so it can be read again. + """ + seplen = len(separator) + if seplen == 0: + raise ValueError('Separator should be at least one-byte string') + + if self._exception is not None: + raise self._exception + + # Consume whole buffer except last bytes, which length is + # one less than seplen. Let's check corner cases with + # separator='SEPARATOR': + # * we have received almost complete separator (without last + # byte). i.e buffer='some textSEPARATO'. In this case we + # can safely consume len(separator) - 1 bytes. + # * last byte of buffer is first byte of separator, i.e. + # buffer='abcdefghijklmnopqrS'. We may safely consume + # everything except that last byte, but this require to + # analyze bytes of buffer that match partial separator. + # This is slow and/or require FSM. For this case our + # implementation is not optimal, since require rescanning + # of data that is known to not belong to separator. In + # real world, separator will not be so long to notice + # performance problems. Even when reading MIME-encoded + # messages :) + + # `offset` is the number of bytes from the beginning of the buffer + # where there is no occurrence of `separator`. + offset = 0 + + # Loop until we find `separator` in the buffer, exceed the buffer size, + # or an EOF has happened. + while True: + buflen = len(self._buffer) + + # Check if we now have enough data in the buffer for `separator` to + # fit. + if buflen - offset >= seplen: + isep = self._buffer.find(separator, offset) + + if isep != -1: + # `separator` is in the buffer. `isep` will be used later + # to retrieve the data. + break + + # see upper comment for explanation. + offset = buflen + 1 - seplen + if offset > self._limit: + raise LimitOverrunError( + 'Separator is not found, and chunk exceed the limit', + offset) + + # Complete message (with full separator) may be present in buffer + # even when EOF flag is set. This may happen when the last chunk + # adds data which makes separator be found. That's why we check for + # EOF *ater* inspecting the buffer. + if self._eof: + chunk = bytes(self._buffer) + self._buffer.clear() + raise IncompleteReadError(chunk, None) + + # _wait_for_data() will resume reading if stream was paused. + yield from self._wait_for_data('readuntil') + + if isep > self._limit: + raise LimitOverrunError( + 'Separator is found, but chunk is longer than limit', isep) + + chunk = self._buffer[:isep + seplen] + del self._buffer[:isep + seplen] + self._maybe_resume_transport() + return bytes(chunk) + + @coroutine + def read(self, n=-1): + """Read up to `n` bytes from the stream. + + If n is not provided, or set to -1, read until EOF and return all read + bytes. If the EOF was received and the internal buffer is empty, return + an empty bytes object. + + If n is zero, return empty bytes object immediately. + + If n is positive, this function try to read `n` bytes, and may return + less or equal bytes than requested, but at least one byte. If EOF was + received before any byte is read, this function returns empty byte + object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + """ + + if self._exception is not None: + raise self._exception + + if n == 0: + return b'' + + if n < 0: + # This used to just loop creating a new waiter hoping to + # collect everything in self._buffer, but that would + # deadlock if the subprocess sends more than self.limit + # bytes. So just call self.read(self._limit) until EOF. + blocks = [] + while True: + block = yield from self.read(self._limit) + if not block: + break + blocks.append(block) + return b''.join(blocks) + + if not self._buffer and not self._eof: + yield from self._wait_for_data('read') + + # This will work right even if buffer is less than n bytes + data = bytes(self._buffer[:n]) + del self._buffer[:n] + + self._maybe_resume_transport() + return data + + @coroutine + def readexactly(self, n): + """Read exactly `n` bytes. + + Raise an IncompleteReadError if EOF is reached before `n` bytes can be + read. The IncompleteReadError.partial attribute of the exception will + contain the partial read bytes. + + if n is zero, return empty bytes object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + """ + if n < 0: + raise ValueError('readexactly size can not be less than zero') + + if self._exception is not None: + raise self._exception + + if n == 0: + return b'' + + while len(self._buffer) < n: + if self._eof: + incomplete = bytes(self._buffer) + self._buffer.clear() + raise IncompleteReadError(incomplete, n) + + yield from self._wait_for_data('readexactly') + + if len(self._buffer) == n: + data = bytes(self._buffer) + self._buffer.clear() + else: + data = bytes(self._buffer[:n]) + del self._buffer[:n] + self._maybe_resume_transport() + return data + + if compat.PY35: + @coroutine + def __aiter__(self): + return self + + @coroutine + def __anext__(self): + val = yield from self.readline() + if val == b'': + raise StopAsyncIteration + return val + + if compat.PY352: + # In Python 3.5.2 and greater, __aiter__ should return + # the asynchronous iterator directly. + def __aiter__(self): + return self diff --git a/Lib/asyncio/subprocess.py b/Lib/asyncio/subprocess.py new file mode 100644 index 0000000000..b2f5304f77 --- /dev/null +++ b/Lib/asyncio/subprocess.py @@ -0,0 +1,213 @@ +__all__ = ['create_subprocess_exec', 'create_subprocess_shell'] + +import subprocess + +from . import events +from . import protocols +from . import streams +from . import tasks +from .coroutines import coroutine +from .log import logger + + +PIPE = subprocess.PIPE +STDOUT = subprocess.STDOUT +DEVNULL = subprocess.DEVNULL + + +class SubprocessStreamProtocol(streams.FlowControlMixin, + protocols.SubprocessProtocol): + """Like StreamReaderProtocol, but for a subprocess.""" + + def __init__(self, limit, loop): + super().__init__(loop=loop) + self._limit = limit + self.stdin = self.stdout = self.stderr = None + self._transport = None + + def __repr__(self): + info = [self.__class__.__name__] + if self.stdin is not None: + info.append('stdin=%r' % self.stdin) + if self.stdout is not None: + info.append('stdout=%r' % self.stdout) + if self.stderr is not None: + info.append('stderr=%r' % self.stderr) + return '<%s>' % ' '.join(info) + + def connection_made(self, transport): + self._transport = transport + + stdout_transport = transport.get_pipe_transport(1) + if stdout_transport is not None: + self.stdout = streams.StreamReader(limit=self._limit, + loop=self._loop) + self.stdout.set_transport(stdout_transport) + + stderr_transport = transport.get_pipe_transport(2) + if stderr_transport is not None: + self.stderr = streams.StreamReader(limit=self._limit, + loop=self._loop) + self.stderr.set_transport(stderr_transport) + + stdin_transport = transport.get_pipe_transport(0) + if stdin_transport is not None: + self.stdin = streams.StreamWriter(stdin_transport, + protocol=self, + reader=None, + loop=self._loop) + + def pipe_data_received(self, fd, data): + if fd == 1: + reader = self.stdout + elif fd == 2: + reader = self.stderr + else: + reader = None + if reader is not None: + reader.feed_data(data) + + def pipe_connection_lost(self, fd, exc): + if fd == 0: + pipe = self.stdin + if pipe is not None: + pipe.close() + self.connection_lost(exc) + return + if fd == 1: + reader = self.stdout + elif fd == 2: + reader = self.stderr + else: + reader = None + if reader != None: + if exc is None: + reader.feed_eof() + else: + reader.set_exception(exc) + + def process_exited(self): + self._transport.close() + self._transport = None + + +class Process: + def __init__(self, transport, protocol, loop): + self._transport = transport + self._protocol = protocol + self._loop = loop + self.stdin = protocol.stdin + self.stdout = protocol.stdout + self.stderr = protocol.stderr + self.pid = transport.get_pid() + + def __repr__(self): + return '<%s %s>' % (self.__class__.__name__, self.pid) + + @property + def returncode(self): + return self._transport.get_returncode() + + @coroutine + def wait(self): + """Wait until the process exit and return the process return code. + + This method is a coroutine.""" + return (yield from self._transport._wait()) + + def send_signal(self, signal): + self._transport.send_signal(signal) + + def terminate(self): + self._transport.terminate() + + def kill(self): + self._transport.kill() + + @coroutine + def _feed_stdin(self, input): + debug = self._loop.get_debug() + self.stdin.write(input) + if debug: + logger.debug('%r communicate: feed stdin (%s bytes)', + self, len(input)) + try: + yield from self.stdin.drain() + except (BrokenPipeError, ConnectionResetError) as exc: + # communicate() ignores BrokenPipeError and ConnectionResetError + if debug: + logger.debug('%r communicate: stdin got %r', self, exc) + + if debug: + logger.debug('%r communicate: close stdin', self) + self.stdin.close() + + @coroutine + def _noop(self): + return None + + @coroutine + def _read_stream(self, fd): + transport = self._transport.get_pipe_transport(fd) + if fd == 2: + stream = self.stderr + else: + assert fd == 1 + stream = self.stdout + if self._loop.get_debug(): + name = 'stdout' if fd == 1 else 'stderr' + logger.debug('%r communicate: read %s', self, name) + output = yield from stream.read() + if self._loop.get_debug(): + name = 'stdout' if fd == 1 else 'stderr' + logger.debug('%r communicate: close %s', self, name) + transport.close() + return output + + @coroutine + def communicate(self, input=None): + if input is not None: + stdin = self._feed_stdin(input) + else: + stdin = self._noop() + if self.stdout is not None: + stdout = self._read_stream(1) + else: + stdout = self._noop() + if self.stderr is not None: + stderr = self._read_stream(2) + else: + stderr = self._noop() + stdin, stdout, stderr = yield from tasks.gather(stdin, stdout, stderr, + loop=self._loop) + yield from self.wait() + return (stdout, stderr) + + +@coroutine +def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None, + loop=None, limit=streams._DEFAULT_LIMIT, **kwds): + if loop is None: + loop = events.get_event_loop() + protocol_factory = lambda: SubprocessStreamProtocol(limit=limit, + loop=loop) + transport, protocol = yield from loop.subprocess_shell( + protocol_factory, + cmd, stdin=stdin, stdout=stdout, + stderr=stderr, **kwds) + return Process(transport, protocol, loop) + +@coroutine +def create_subprocess_exec(program, *args, stdin=None, stdout=None, + stderr=None, loop=None, + limit=streams._DEFAULT_LIMIT, **kwds): + if loop is None: + loop = events.get_event_loop() + protocol_factory = lambda: SubprocessStreamProtocol(limit=limit, + loop=loop) + transport, protocol = yield from loop.subprocess_exec( + protocol_factory, + program, *args, + stdin=stdin, stdout=stdout, + stderr=stderr, **kwds) + return Process(transport, protocol, loop) diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py new file mode 100644 index 0000000000..d31c0109f4 --- /dev/null +++ b/Lib/asyncio/tasks.py @@ -0,0 +1,786 @@ +"""Support for tasks, coroutines and the scheduler.""" + +__all__ = ['Task', + 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED', + 'wait', 'wait_for', 'as_completed', 'sleep', 'async', + 'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe', + ] + +import concurrent.futures +import functools +import inspect +import warnings +import weakref + +from . import base_tasks +from . import compat +from . import coroutines +from . import events +from . import futures +from .coroutines import coroutine + + +def current_task(loop=None): + """Return a currently executed task.""" + if loop is None: + loop = events.get_running_loop() + return _current_tasks.get(loop) + + +def all_tasks(loop=None): + """Return a set of all tasks for the loop.""" + if loop is None: + loop = events.get_running_loop() + return {t for t in _all_tasks + if futures._get_loop(t) is loop and not t.done()} + + +def _all_tasks_compat(loop=None): + # Different from "all_task()" by returning *all* Tasks, including + # the completed ones. Used to implement deprecated "Tasks.all_task()" + # method. + if loop is None: + loop = events.get_event_loop() + return {t for t in _all_tasks if futures._get_loop(t) is loop} + + +class Task(futures.Future): + """A coroutine wrapped in a Future.""" + + # An important invariant maintained while a Task not done: + # + # - Either _fut_waiter is None, and _step() is scheduled; + # - or _fut_waiter is some Future, and _step() is *not* scheduled. + # + # The only transition from the latter to the former is through + # _wakeup(). When _fut_waiter is not None, one of its callbacks + # must be _wakeup(). + + # Weak set containing all tasks alive. + _all_tasks = weakref.WeakSet() + + # Dictionary containing tasks that are currently active in + # all running event loops. {EventLoop: Task} + _current_tasks = {} + + # If False, don't log a message if the task is destroyed whereas its + # status is still pending + _log_destroy_pending = True + + @classmethod + def current_task(cls, loop=None): + """Return the currently running task in an event loop or None. + + By default the current task for the current event loop is returned. + + None is returned when called not in the context of a Task. + """ + if loop is None: + loop = events.get_event_loop() + return cls._current_tasks.get(loop) + + @classmethod + def all_tasks(cls, loop=None): + """Return a set of all tasks for an event loop. + + By default all tasks for the current event loop are returned. + """ + if loop is None: + loop = events.get_event_loop() + return {t for t in cls._all_tasks if t._loop is loop} + + def __init__(self, coro, *, loop=None): + assert coroutines.iscoroutine(coro), repr(coro) + super().__init__(loop=loop) + if self._source_traceback: + del self._source_traceback[-1] + self._coro = coro + self._fut_waiter = None + self._must_cancel = False + self._loop.call_soon(self._step) + self.__class__._all_tasks.add(self) + + # On Python 3.3 or older, objects with a destructor that are part of a + # reference cycle are never destroyed. That's not the case any more on + # Python 3.4 thanks to the PEP 442. + if compat.PY34: + def __del__(self): + if self._state == futures._PENDING and self._log_destroy_pending: + context = { + 'task': self, + 'message': 'Task was destroyed but it is pending!', + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + futures.Future.__del__(self) + + def _repr_info(self): + return base_tasks._task_repr_info(self) + + def get_stack(self, *, limit=None): + """Return the list of stack frames for this task's coroutine. + + If the coroutine is not done, this returns the stack where it is + suspended. If the coroutine has completed successfully or was + cancelled, this returns an empty list. If the coroutine was + terminated by an exception, this returns the list of traceback + frames. + + The frames are always ordered from oldest to newest. + + The optional limit gives the maximum number of frames to + return; by default all available frames are returned. Its + meaning differs depending on whether a stack or a traceback is + returned: the newest frames of a stack are returned, but the + oldest frames of a traceback are returned. (This matches the + behavior of the traceback module.) + + For reasons beyond our control, only one stack frame is + returned for a suspended coroutine. + """ + return base_tasks._task_get_stack(self, limit) + + def print_stack(self, *, limit=None, file=None): + """Print the stack or traceback for this task's coroutine. + + This produces output similar to that of the traceback module, + for the frames retrieved by get_stack(). The limit argument + is passed to get_stack(). The file argument is an I/O stream + to which the output is written; by default output is written + to sys.stderr. + """ + return base_tasks._task_print_stack(self, limit, file) + + def cancel(self): + """Request that this task cancel itself. + + This arranges for a CancelledError to be thrown into the + wrapped coroutine on the next cycle through the event loop. + The coroutine then has a chance to clean up or even deny + the request using try/except/finally. + + Unlike Future.cancel, this does not guarantee that the + task will be cancelled: the exception might be caught and + acted upon, delaying cancellation of the task or preventing + cancellation completely. The task may also return a value or + raise a different exception. + + Immediately after this method is called, Task.cancelled() will + not return True (unless the task was already cancelled). A + task will be marked as cancelled when the wrapped coroutine + terminates with a CancelledError exception (even if cancel() + was not called). + """ + if self.done(): + return False + if self._fut_waiter is not None: + if self._fut_waiter.cancel(): + # Leave self._fut_waiter; it may be a Task that + # catches and ignores the cancellation so we may have + # to cancel it again later. + return True + # It must be the case that self._step is already scheduled. + self._must_cancel = True + return True + + def _step(self, exc=None): + assert not self.done(), \ + '_step(): already done: {!r}, {!r}'.format(self, exc) + if self._must_cancel: + if not isinstance(exc, futures.CancelledError): + exc = futures.CancelledError() + self._must_cancel = False + coro = self._coro + self._fut_waiter = None + + self.__class__._current_tasks[self._loop] = self + # Call either coro.throw(exc) or coro.send(None). + try: + if exc is None: + # We use the `send` method directly, because coroutines + # don't have `__iter__` and `__next__` methods. + result = coro.send(None) + else: + result = coro.throw(exc) + except StopIteration as exc: + self.set_result(exc.value) + except futures.CancelledError: + super().cancel() # I.e., Future.cancel(self). + except Exception as exc: + self.set_exception(exc) + except BaseException as exc: + self.set_exception(exc) + raise + else: + blocking = getattr(result, '_asyncio_future_blocking', None) + if blocking is not None: + # Yielded Future must come from Future.__iter__(). + if result._loop is not self._loop: + self._loop.call_soon( + self._step, + RuntimeError( + 'Task {!r} got Future {!r} attached to a ' + 'different loop'.format(self, result))) + elif blocking: + if result is self: + self._loop.call_soon( + self._step, + RuntimeError( + 'Task cannot await on itself: {!r}'.format( + self))) + else: + result._asyncio_future_blocking = False + result.add_done_callback(self._wakeup) + self._fut_waiter = result + if self._must_cancel: + if self._fut_waiter.cancel(): + self._must_cancel = False + else: + self._loop.call_soon( + self._step, + RuntimeError( + 'yield was used instead of yield from ' + 'in task {!r} with {!r}'.format(self, result))) + elif result is None: + # Bare yield relinquishes control for one event loop iteration. + self._loop.call_soon(self._step) + elif inspect.isgenerator(result): + # Yielding a generator is just wrong. + self._loop.call_soon( + self._step, + RuntimeError( + 'yield was used instead of yield from for ' + 'generator in task {!r} with {}'.format( + self, result))) + else: + # Yielding something else is an error. + self._loop.call_soon( + self._step, + RuntimeError( + 'Task got bad yield: {!r}'.format(result))) + finally: + self.__class__._current_tasks.pop(self._loop) + self = None # Needed to break cycles when an exception occurs. + + def _wakeup(self, future): + try: + future.result() + except Exception as exc: + # This may also be a cancellation. + self._step(exc) + else: + # Don't pass the value of `future.result()` explicitly, + # as `Future.__iter__` and `Future.__await__` don't need it. + # If we call `_step(value, None)` instead of `_step()`, + # Python eval loop would use `.send(value)` method call, + # instead of `__next__()`, which is slower for futures + # that return non-generator iterators from their `__iter__`. + self._step() + self = None # Needed to break cycles when an exception occurs. + + +_PyTask = Task + + +try: + import _asyncio +except ImportError: + pass +else: + # _CTask is needed for tests. + Task = _CTask = _asyncio.Task + + +# wait() and as_completed() similar to those in PEP 3148. + +FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED +FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION +ALL_COMPLETED = concurrent.futures.ALL_COMPLETED + + +@coroutine +def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED): + """Wait for the Futures and coroutines given by fs to complete. + + The sequence futures must not be empty. + + Coroutines will be wrapped in Tasks. + + Returns two sets of Future: (done, pending). + + Usage: + + done, pending = yield from asyncio.wait(fs) + + Note: This does not raise TimeoutError! Futures that aren't done + when the timeout occurs are returned in the second set. + """ + if futures.isfuture(fs) or coroutines.iscoroutine(fs): + raise TypeError("expect a list of futures, not %s" % type(fs).__name__) + if not fs: + raise ValueError('Set of coroutines/Futures is empty.') + if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED): + raise ValueError('Invalid return_when value: {}'.format(return_when)) + + if loop is None: + loop = events.get_event_loop() + + fs = {ensure_future(f, loop=loop) for f in set(fs)} + + return (yield from _wait(fs, timeout, return_when, loop)) + + +def _release_waiter(waiter, *args): + if not waiter.done(): + waiter.set_result(None) + + +@coroutine +def wait_for(fut, timeout, *, loop=None): + """Wait for the single Future or coroutine to complete, with timeout. + + Coroutine will be wrapped in Task. + + Returns result of the Future or coroutine. When a timeout occurs, + it cancels the task and raises TimeoutError. To avoid the task + cancellation, wrap it in shield(). + + If the wait is cancelled, the task is also cancelled. + + This function is a coroutine. + """ + if loop is None: + loop = events.get_event_loop() + + if timeout is None: + return (yield from fut) + + waiter = loop.create_future() + timeout_handle = loop.call_later(timeout, _release_waiter, waiter) + cb = functools.partial(_release_waiter, waiter) + + fut = ensure_future(fut, loop=loop) + fut.add_done_callback(cb) + + try: + # wait until the future completes or the timeout + try: + yield from waiter + except futures.CancelledError: + fut.remove_done_callback(cb) + fut.cancel() + raise + + if fut.done(): + return fut.result() + else: + fut.remove_done_callback(cb) + fut.cancel() + raise futures.TimeoutError() + finally: + timeout_handle.cancel() + + +@coroutine +def _wait(fs, timeout, return_when, loop): + """Internal helper for wait() and wait_for(). + + The fs argument must be a collection of Futures. + """ + assert fs, 'Set of Futures is empty.' + waiter = loop.create_future() + timeout_handle = None + if timeout is not None: + timeout_handle = loop.call_later(timeout, _release_waiter, waiter) + counter = len(fs) + + def _on_completion(f): + nonlocal counter + counter -= 1 + if (counter <= 0 or + return_when == FIRST_COMPLETED or + return_when == FIRST_EXCEPTION and (not f.cancelled() and + f.exception() is not None)): + if timeout_handle is not None: + timeout_handle.cancel() + if not waiter.done(): + waiter.set_result(None) + + for f in fs: + f.add_done_callback(_on_completion) + + try: + yield from waiter + finally: + if timeout_handle is not None: + timeout_handle.cancel() + + done, pending = set(), set() + for f in fs: + f.remove_done_callback(_on_completion) + if f.done(): + done.add(f) + else: + pending.add(f) + return done, pending + + +# This is *not* a @coroutine! It is just an iterator (yielding Futures). +def as_completed(fs, *, loop=None, timeout=None): + """Return an iterator whose values are coroutines. + + When waiting for the yielded coroutines you'll get the results (or + exceptions!) of the original Futures (or coroutines), in the order + in which and as soon as they complete. + + This differs from PEP 3148; the proper way to use this is: + + for f in as_completed(fs): + result = yield from f # The 'yield from' may raise. + # Use result. + + If a timeout is specified, the 'yield from' will raise + TimeoutError when the timeout occurs before all Futures are done. + + Note: The futures 'f' are not necessarily members of fs. + """ + if futures.isfuture(fs) or coroutines.iscoroutine(fs): + raise TypeError("expect a list of futures, not %s" % type(fs).__name__) + loop = loop if loop is not None else events.get_event_loop() + todo = {ensure_future(f, loop=loop) for f in set(fs)} + from .queues import Queue # Import here to avoid circular import problem. + done = Queue(loop=loop) + timeout_handle = None + + def _on_timeout(): + for f in todo: + f.remove_done_callback(_on_completion) + done.put_nowait(None) # Queue a dummy value for _wait_for_one(). + todo.clear() # Can't do todo.remove(f) in the loop. + + def _on_completion(f): + if not todo: + return # _on_timeout() was here first. + todo.remove(f) + done.put_nowait(f) + if not todo and timeout_handle is not None: + timeout_handle.cancel() + + @coroutine + def _wait_for_one(): + f = yield from done.get() + if f is None: + # Dummy value from _on_timeout(). + raise futures.TimeoutError + return f.result() # May raise f.exception(). + + for f in todo: + f.add_done_callback(_on_completion) + if todo and timeout is not None: + timeout_handle = loop.call_later(timeout, _on_timeout) + for _ in range(len(todo)): + yield _wait_for_one() + + +@coroutine +def sleep(delay, result=None, *, loop=None): + """Coroutine that completes after a given time (in seconds).""" + if delay == 0: + yield + return result + + if loop is None: + loop = events.get_event_loop() + future = loop.create_future() + h = future._loop.call_later(delay, + futures._set_result_unless_cancelled, + future, result) + try: + return (yield from future) + finally: + h.cancel() + + +def async_(coro_or_future, *, loop=None): + """Wrap a coroutine in a future. + + If the argument is a Future, it is returned directly. + + This function is deprecated in 3.5. Use asyncio.ensure_future() instead. + """ + + warnings.warn("asyncio.async() function is deprecated, use ensure_future()", + DeprecationWarning) + + return ensure_future(coro_or_future, loop=loop) + +# Silence DeprecationWarning: +globals()['async'] = async_ +async_.__name__ = 'async' +del async_ + + +def ensure_future(coro_or_future, *, loop=None): + """Wrap a coroutine or an awaitable in a future. + + If the argument is a Future, it is returned directly. + """ + if futures.isfuture(coro_or_future): + if loop is not None and loop is not coro_or_future._loop: + raise ValueError('loop argument must agree with Future') + return coro_or_future + elif coroutines.iscoroutine(coro_or_future): + if loop is None: + loop = events.get_event_loop() + task = loop.create_task(coro_or_future) + if task._source_traceback: + del task._source_traceback[-1] + return task + elif compat.PY35 and inspect.isawaitable(coro_or_future): + return ensure_future(_wrap_awaitable(coro_or_future), loop=loop) + else: + raise TypeError('A Future, a coroutine or an awaitable is required') + + +@coroutine +def _wrap_awaitable(awaitable): + """Helper for asyncio.ensure_future(). + + Wraps awaitable (an object with __await__) into a coroutine + that will later be wrapped in a Task by ensure_future(). + """ + return (yield from awaitable.__await__()) + + +class _GatheringFuture(futures.Future): + """Helper for gather(). + + This overrides cancel() to cancel all the children and act more + like Task.cancel(), which doesn't immediately mark itself as + cancelled. + """ + + def __init__(self, children, *, loop=None): + super().__init__(loop=loop) + self._children = children + + def cancel(self): + if self.done(): + return False + ret = False + for child in self._children: + if child.cancel(): + ret = True + return ret + + +def gather(*coros_or_futures, loop=None, return_exceptions=False): + """Return a future aggregating results from the given coroutines + or futures. + + Coroutines will be wrapped in a future and scheduled in the event + loop. They will not necessarily be scheduled in the same order as + passed in. + + All futures must share the same event loop. If all the tasks are + done successfully, the returned future's result is the list of + results (in the order of the original sequence, not necessarily + the order of results arrival). If *return_exceptions* is True, + exceptions in the tasks are treated the same as successful + results, and gathered in the result list; otherwise, the first + raised exception will be immediately propagated to the returned + future. + + Cancellation: if the outer Future is cancelled, all children (that + have not completed yet) are also cancelled. If any child is + cancelled, this is treated as if it raised CancelledError -- + the outer Future is *not* cancelled in this case. (This is to + prevent the cancellation of one child to cause other children to + be cancelled.) + """ + if not coros_or_futures: + if loop is None: + loop = events.get_event_loop() + outer = loop.create_future() + outer.set_result([]) + return outer + + arg_to_fut = {} + for arg in set(coros_or_futures): + if not futures.isfuture(arg): + fut = ensure_future(arg, loop=loop) + if loop is None: + loop = fut._loop + # The caller cannot control this future, the "destroy pending task" + # warning should not be emitted. + fut._log_destroy_pending = False + else: + fut = arg + if loop is None: + loop = fut._loop + elif fut._loop is not loop: + raise ValueError("futures are tied to different event loops") + arg_to_fut[arg] = fut + + children = [arg_to_fut[arg] for arg in coros_or_futures] + nchildren = len(children) + outer = _GatheringFuture(children, loop=loop) + nfinished = 0 + results = [None] * nchildren + + def _done_callback(i, fut): + nonlocal nfinished + if outer.done(): + if not fut.cancelled(): + # Mark exception retrieved. + fut.exception() + return + + if fut.cancelled(): + res = futures.CancelledError() + if not return_exceptions: + outer.set_exception(res) + return + elif fut._exception is not None: + res = fut.exception() # Mark exception retrieved. + if not return_exceptions: + outer.set_exception(res) + return + else: + res = fut._result + results[i] = res + nfinished += 1 + if nfinished == nchildren: + outer.set_result(results) + + for i, fut in enumerate(children): + fut.add_done_callback(functools.partial(_done_callback, i)) + return outer + + +def shield(arg, *, loop=None): + """Wait for a future, shielding it from cancellation. + + The statement + + res = yield from shield(something()) + + is exactly equivalent to the statement + + res = yield from something() + + *except* that if the coroutine containing it is cancelled, the + task running in something() is not cancelled. From the POV of + something(), the cancellation did not happen. But its caller is + still cancelled, so the yield-from expression still raises + CancelledError. Note: If something() is cancelled by other means + this will still cancel shield(). + + If you want to completely ignore cancellation (not recommended) + you can combine shield() with a try/except clause, as follows: + + try: + res = yield from shield(something()) + except CancelledError: + res = None + """ + inner = ensure_future(arg, loop=loop) + if inner.done(): + # Shortcut. + return inner + loop = inner._loop + outer = loop.create_future() + + def _done_callback(inner): + if outer.cancelled(): + if not inner.cancelled(): + # Mark inner's result as retrieved. + inner.exception() + return + + if inner.cancelled(): + outer.cancel() + else: + exc = inner.exception() + if exc is not None: + outer.set_exception(exc) + else: + outer.set_result(inner.result()) + + inner.add_done_callback(_done_callback) + return outer + + +def run_coroutine_threadsafe(coro, loop): + """Submit a coroutine object to a given event loop. + + Return a concurrent.futures.Future to access the result. + """ + if not coroutines.iscoroutine(coro): + raise TypeError('A coroutine object is required') + future = concurrent.futures.Future() + + def callback(): + try: + futures._chain_future(ensure_future(coro, loop=loop), future) + except Exception as exc: + if future.set_running_or_notify_cancel(): + future.set_exception(exc) + raise + + loop.call_soon_threadsafe(callback) + return future + + +# WeakSet containing all alive tasks. +_all_tasks = weakref.WeakSet() + +# Dictionary containing tasks that are currently active in +# all running event loops. {EventLoop: Task} +_current_tasks = {} + + +def _register_task(task): + """Register a new task in asyncio as executed by loop.""" + _all_tasks.add(task) + + +def _enter_task(loop, task): + current_task = _current_tasks.get(loop) + if current_task is not None: + raise RuntimeError(f"Cannot enter into task {task!r} while another " + f"task {current_task!r} is being executed.") + _current_tasks[loop] = task + + +def _leave_task(loop, task): + current_task = _current_tasks.get(loop) + if current_task is not task: + raise RuntimeError(f"Leaving task {task!r} does not match " + f"the current task {current_task!r}.") + del _current_tasks[loop] + + +def _unregister_task(task): + """Unregister a task.""" + _all_tasks.discard(task) + + +_py_register_task = _register_task +_py_unregister_task = _unregister_task +_py_enter_task = _enter_task +_py_leave_task = _leave_task + + +try: + from _asyncio import (_register_task, _unregister_task, + _enter_task, _leave_task, + _all_tasks, _current_tasks) +except ImportError: + pass +else: + _c_register_task = _register_task + _c_unregister_task = _unregister_task + _c_enter_task = _enter_task + _c_leave_task = _leave_task diff --git a/Lib/asyncio/test_utils.py b/Lib/asyncio/test_utils.py new file mode 100644 index 0000000000..99e3839f45 --- /dev/null +++ b/Lib/asyncio/test_utils.py @@ -0,0 +1,503 @@ +"""Utilities shared by tests.""" + +import collections +import contextlib +import io +import logging +import os +import re +import socket +import socketserver +import sys +import tempfile +import threading +import time +import unittest +import weakref + +from unittest import mock + +from http.server import HTTPServer +from wsgiref.simple_server import WSGIRequestHandler, WSGIServer + +try: + import ssl +except ImportError: # pragma: no cover + ssl = None + +from . import base_events +from . import compat +from . import events +from . import futures +from . import selectors +from . import tasks +from .coroutines import coroutine +from .log import logger + + +if sys.platform == 'win32': # pragma: no cover + from .windows_utils import socketpair +else: + from socket import socketpair # pragma: no cover + + +def dummy_ssl_context(): + if ssl is None: + return None + else: + return ssl.SSLContext(ssl.PROTOCOL_SSLv23) + + +def run_briefly(loop): + @coroutine + def once(): + pass + gen = once() + t = loop.create_task(gen) + # Don't log a warning if the task is not done after run_until_complete(). + # It occurs if the loop is stopped or if a task raises a BaseException. + t._log_destroy_pending = False + try: + loop.run_until_complete(t) + finally: + gen.close() + + +def run_until(loop, pred, timeout=30): + deadline = time.time() + timeout + while not pred(): + if timeout is not None: + timeout = deadline - time.time() + if timeout <= 0: + raise futures.TimeoutError() + loop.run_until_complete(tasks.sleep(0.001, loop=loop)) + + +def run_once(loop): + """Legacy API to run once through the event loop. + + This is the recommended pattern for test code. It will poll the + selector once and run all callbacks scheduled in response to I/O + events. + """ + loop.call_soon(loop.stop) + loop.run_forever() + + +class SilentWSGIRequestHandler(WSGIRequestHandler): + + def get_stderr(self): + return io.StringIO() + + def log_message(self, format, *args): + pass + + +class SilentWSGIServer(WSGIServer): + + request_timeout = 2 + + def get_request(self): + request, client_addr = super().get_request() + request.settimeout(self.request_timeout) + return request, client_addr + + def handle_error(self, request, client_address): + pass + + +class SSLWSGIServerMixin: + + def finish_request(self, request, client_address): + # The relative location of our test directory (which + # contains the ssl key and certificate files) differs + # between the stdlib and stand-alone asyncio. + # Prefer our own if we can find it. + here = os.path.join(os.path.dirname(__file__), '..', 'tests') + if not os.path.isdir(here): + here = os.path.join(os.path.dirname(os.__file__), + 'test', 'test_asyncio') + keyfile = os.path.join(here, 'ssl_key.pem') + certfile = os.path.join(here, 'ssl_cert.pem') + context = ssl.SSLContext() + context.load_cert_chain(certfile, keyfile) + + ssock = context.wrap_socket(request, server_side=True) + try: + self.RequestHandlerClass(ssock, client_address, self) + ssock.close() + except OSError: + # maybe socket has been closed by peer + pass + + +class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer): + pass + + +def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls): + + def app(environ, start_response): + status = '200 OK' + headers = [('Content-type', 'text/plain')] + start_response(status, headers) + return [b'Test message'] + + # Run the test WSGI server in a separate thread in order not to + # interfere with event handling in the main thread + server_class = server_ssl_cls if use_ssl else server_cls + httpd = server_class(address, SilentWSGIRequestHandler) + httpd.set_app(app) + httpd.address = httpd.server_address + server_thread = threading.Thread( + target=lambda: httpd.serve_forever(poll_interval=0.05)) + server_thread.start() + try: + yield httpd + finally: + httpd.shutdown() + httpd.server_close() + server_thread.join() + + +if hasattr(socket, 'AF_UNIX'): + + class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer): + + def server_bind(self): + socketserver.UnixStreamServer.server_bind(self) + self.server_name = '127.0.0.1' + self.server_port = 80 + + + class UnixWSGIServer(UnixHTTPServer, WSGIServer): + + request_timeout = 2 + + def server_bind(self): + UnixHTTPServer.server_bind(self) + self.setup_environ() + + def get_request(self): + request, client_addr = super().get_request() + request.settimeout(self.request_timeout) + # Code in the stdlib expects that get_request + # will return a socket and a tuple (host, port). + # However, this isn't true for UNIX sockets, + # as the second return value will be a path; + # hence we return some fake data sufficient + # to get the tests going + return request, ('127.0.0.1', '') + + + class SilentUnixWSGIServer(UnixWSGIServer): + + def handle_error(self, request, client_address): + pass + + + class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer): + pass + + + def gen_unix_socket_path(): + with tempfile.NamedTemporaryFile() as file: + return file.name + + + @contextlib.contextmanager + def unix_socket_path(): + path = gen_unix_socket_path() + try: + yield path + finally: + try: + os.unlink(path) + except OSError: + pass + + + @contextlib.contextmanager + def run_test_unix_server(*, use_ssl=False): + with unix_socket_path() as path: + yield from _run_test_server(address=path, use_ssl=use_ssl, + server_cls=SilentUnixWSGIServer, + server_ssl_cls=UnixSSLWSGIServer) + + +@contextlib.contextmanager +def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False): + yield from _run_test_server(address=(host, port), use_ssl=use_ssl, + server_cls=SilentWSGIServer, + server_ssl_cls=SSLWSGIServer) + + +def make_test_protocol(base): + dct = {} + for name in dir(base): + if name.startswith('__') and name.endswith('__'): + # skip magic names + continue + dct[name] = MockCallback(return_value=None) + return type('TestProtocol', (base,) + base.__bases__, dct)() + + +class TestSelector(selectors.BaseSelector): + + def __init__(self): + self.keys = {} + + def register(self, fileobj, events, data=None): + key = selectors.SelectorKey(fileobj, 0, events, data) + self.keys[fileobj] = key + return key + + def unregister(self, fileobj): + return self.keys.pop(fileobj) + + def select(self, timeout): + return [] + + def get_map(self): + return self.keys + + +class TestLoop(base_events.BaseEventLoop): + """Loop for unittests. + + It manages self time directly. + If something scheduled to be executed later then + on next loop iteration after all ready handlers done + generator passed to __init__ is calling. + + Generator should be like this: + + def gen(): + ... + when = yield ... + ... = yield time_advance + + Value returned by yield is absolute time of next scheduled handler. + Value passed to yield is time advance to move loop's time forward. + """ + + def __init__(self, gen=None): + super().__init__() + + if gen is None: + def gen(): + yield + self._check_on_close = False + else: + self._check_on_close = True + + self._gen = gen() + next(self._gen) + self._time = 0 + self._clock_resolution = 1e-9 + self._timers = [] + self._selector = TestSelector() + + self.readers = {} + self.writers = {} + self.reset_counters() + + self._transports = weakref.WeakValueDictionary() + + def time(self): + return self._time + + def advance_time(self, advance): + """Move test time forward.""" + if advance: + self._time += advance + + def close(self): + super().close() + if self._check_on_close: + try: + self._gen.send(0) + except StopIteration: + pass + else: # pragma: no cover + raise AssertionError("Time generator is not finished") + + def _add_reader(self, fd, callback, *args): + self.readers[fd] = events.Handle(callback, args, self) + + def _remove_reader(self, fd): + self.remove_reader_count[fd] += 1 + if fd in self.readers: + del self.readers[fd] + return True + else: + return False + + def assert_reader(self, fd, callback, *args): + assert fd in self.readers, 'fd {} is not registered'.format(fd) + handle = self.readers[fd] + assert handle._callback == callback, '{!r} != {!r}'.format( + handle._callback, callback) + assert handle._args == args, '{!r} != {!r}'.format( + handle._args, args) + + def _add_writer(self, fd, callback, *args): + self.writers[fd] = events.Handle(callback, args, self) + + def _remove_writer(self, fd): + self.remove_writer_count[fd] += 1 + if fd in self.writers: + del self.writers[fd] + return True + else: + return False + + def assert_writer(self, fd, callback, *args): + assert fd in self.writers, 'fd {} is not registered'.format(fd) + handle = self.writers[fd] + assert handle._callback == callback, '{!r} != {!r}'.format( + handle._callback, callback) + assert handle._args == args, '{!r} != {!r}'.format( + handle._args, args) + + def _ensure_fd_no_transport(self, fd): + try: + transport = self._transports[fd] + except KeyError: + pass + else: + raise RuntimeError( + 'File descriptor {!r} is used by transport {!r}'.format( + fd, transport)) + + def add_reader(self, fd, callback, *args): + """Add a reader callback.""" + self._ensure_fd_no_transport(fd) + return self._add_reader(fd, callback, *args) + + def remove_reader(self, fd): + """Remove a reader callback.""" + self._ensure_fd_no_transport(fd) + return self._remove_reader(fd) + + def add_writer(self, fd, callback, *args): + """Add a writer callback..""" + self._ensure_fd_no_transport(fd) + return self._add_writer(fd, callback, *args) + + def remove_writer(self, fd): + """Remove a writer callback.""" + self._ensure_fd_no_transport(fd) + return self._remove_writer(fd) + + def reset_counters(self): + self.remove_reader_count = collections.defaultdict(int) + self.remove_writer_count = collections.defaultdict(int) + + def _run_once(self): + super()._run_once() + for when in self._timers: + advance = self._gen.send(when) + self.advance_time(advance) + self._timers = [] + + def call_at(self, when, callback, *args): + self._timers.append(when) + return super().call_at(when, callback, *args) + + def _process_events(self, event_list): + return + + def _write_to_self(self): + pass + + +def MockCallback(**kwargs): + return mock.Mock(spec=['__call__'], **kwargs) + + +class MockPattern(str): + """A regex based str with a fuzzy __eq__. + + Use this helper with 'mock.assert_called_with', or anywhere + where a regex comparison between strings is needed. + + For instance: + mock_call.assert_called_with(MockPattern('spam.*ham')) + """ + def __eq__(self, other): + return bool(re.search(str(self), other, re.S)) + + +def get_function_source(func): + source = events._get_function_source(func) + if source is None: + raise ValueError("unable to get the source of %r" % (func,)) + return source + + +class TestCase(unittest.TestCase): + def set_event_loop(self, loop, *, cleanup=True): + assert loop is not None + # ensure that the event loop is passed explicitly in asyncio + events.set_event_loop(None) + if cleanup: + self.addCleanup(loop.close) + + def new_test_loop(self, gen=None): + loop = TestLoop(gen) + self.set_event_loop(loop) + return loop + + def setUp(self): + self._get_running_loop = events._get_running_loop + events._get_running_loop = lambda: None + + def tearDown(self): + events._get_running_loop = self._get_running_loop + + events.set_event_loop(None) + + # Detect CPython bug #23353: ensure that yield/yield-from is not used + # in an except block of a generator + self.assertEqual(sys.exc_info(), (None, None, None)) + + if not compat.PY34: + # Python 3.3 compatibility + def subTest(self, *args, **kwargs): + class EmptyCM: + def __enter__(self): + pass + def __exit__(self, *exc): + pass + return EmptyCM() + + +@contextlib.contextmanager +def disable_logger(): + """Context manager to disable asyncio logger. + + For example, it can be used to ignore warnings in debug mode. + """ + old_level = logger.level + try: + logger.setLevel(logging.CRITICAL+1) + yield + finally: + logger.setLevel(old_level) + + +def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM, + family=socket.AF_INET): + """Create a mock of a non-blocking socket.""" + sock = mock.MagicMock(socket.socket) + sock.proto = proto + sock.type = type + sock.family = family + sock.gettimeout.return_value = 0.0 + return sock + + +def force_legacy_ssl_support(): + return mock.patch('asyncio.sslproto._is_sslproto_available', + return_value=False) diff --git a/Lib/asyncio/transports.py b/Lib/asyncio/transports.py new file mode 100644 index 0000000000..0db0875715 --- /dev/null +++ b/Lib/asyncio/transports.py @@ -0,0 +1,306 @@ +"""Abstract Transport class.""" + +from asyncio import compat + +__all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport', + 'Transport', 'DatagramTransport', 'SubprocessTransport', + ] + + +class BaseTransport: + """Base class for transports.""" + + def __init__(self, extra=None): + if extra is None: + extra = {} + self._extra = extra + + def get_extra_info(self, name, default=None): + """Get optional transport information.""" + return self._extra.get(name, default) + + def is_closing(self): + """Return True if the transport is closing or closed.""" + raise NotImplementedError + + def close(self): + """Close the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) called + with None as its argument. + """ + raise NotImplementedError + + def set_protocol(self, protocol): + """Set a new protocol.""" + raise NotImplementedError + + def get_protocol(self): + """Return the current protocol.""" + raise NotImplementedError + + +class ReadTransport(BaseTransport): + """Interface for read-only transports.""" + + def pause_reading(self): + """Pause the receiving end. + + No data will be passed to the protocol's data_received() + method until resume_reading() is called. + """ + raise NotImplementedError + + def resume_reading(self): + """Resume the receiving end. + + Data received will once again be passed to the protocol's + data_received() method. + """ + raise NotImplementedError + + +class WriteTransport(BaseTransport): + """Interface for write-only transports.""" + + def set_write_buffer_limits(self, high=None, low=None): + """Set the high- and low-water limits for write flow control. + + These two values control when to call the protocol's + pause_writing() and resume_writing() methods. If specified, + the low-water limit must be less than or equal to the + high-water limit. Neither value can be negative. + + The defaults are implementation-specific. If only the + high-water limit is given, the low-water limit defaults to an + implementation-specific value less than or equal to the + high-water limit. Setting high to zero forces low to zero as + well, and causes pause_writing() to be called whenever the + buffer becomes non-empty. Setting low to zero causes + resume_writing() to be called only once the buffer is empty. + Use of zero for either limit is generally sub-optimal as it + reduces opportunities for doing I/O and computation + concurrently. + """ + raise NotImplementedError + + def get_write_buffer_size(self): + """Return the current size of the write buffer.""" + raise NotImplementedError + + def write(self, data): + """Write some data bytes to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + """ + raise NotImplementedError + + def writelines(self, list_of_data): + """Write a list (or any iterable) of data bytes to the transport. + + The default implementation concatenates the arguments and + calls write() on the result. + """ + data = compat.flatten_list_bytes(list_of_data) + self.write(data) + + def write_eof(self): + """Close the write end after flushing buffered data. + + (This is like typing ^D into a UNIX program reading from stdin.) + + Data may still be received. + """ + raise NotImplementedError + + def can_write_eof(self): + """Return True if this transport supports write_eof(), False if not.""" + raise NotImplementedError + + def abort(self): + """Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + raise NotImplementedError + + +class Transport(ReadTransport, WriteTransport): + """Interface representing a bidirectional transport. + + There may be several implementations, but typically, the user does + not implement new transports; rather, the platform provides some + useful transports that are implemented using the platform's best + practices. + + The user never instantiates a transport directly; they call a + utility function, passing it a protocol factory and other + information necessary to create the transport and protocol. (E.g. + EventLoop.create_connection() or EventLoop.create_server().) + + The utility function will asynchronously create a transport and a + protocol and hook them up by calling the protocol's + connection_made() method, passing it the transport. + + The implementation here raises NotImplemented for every method + except writelines(), which calls write() in a loop. + """ + + +class DatagramTransport(BaseTransport): + """Interface for datagram (UDP) transports.""" + + def sendto(self, data, addr=None): + """Send data to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + addr is target socket address. + If addr is None use target address pointed on transport creation. + """ + raise NotImplementedError + + def abort(self): + """Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + raise NotImplementedError + + +class SubprocessTransport(BaseTransport): + + def get_pid(self): + """Get subprocess id.""" + raise NotImplementedError + + def get_returncode(self): + """Get subprocess returncode. + + See also + http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode + """ + raise NotImplementedError + + def get_pipe_transport(self, fd): + """Get transport for pipe with number fd.""" + raise NotImplementedError + + def send_signal(self, signal): + """Send signal to subprocess. + + See also: + docs.python.org/3/library/subprocess#subprocess.Popen.send_signal + """ + raise NotImplementedError + + def terminate(self): + """Stop the subprocess. + + Alias for close() method. + + On Posix OSs the method sends SIGTERM to the subprocess. + On Windows the Win32 API function TerminateProcess() + is called to stop the subprocess. + + See also: + http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate + """ + raise NotImplementedError + + def kill(self): + """Kill the subprocess. + + On Posix OSs the function sends SIGKILL to the subprocess. + On Windows kill() is an alias for terminate(). + + See also: + http://docs.python.org/3/library/subprocess#subprocess.Popen.kill + """ + raise NotImplementedError + + +class _FlowControlMixin(Transport): + """All the logic for (write) flow control in a mix-in base class. + + The subclass must implement get_write_buffer_size(). It must call + _maybe_pause_protocol() whenever the write buffer size increases, + and _maybe_resume_protocol() whenever it decreases. It may also + override set_write_buffer_limits() (e.g. to specify different + defaults). + + The subclass constructor must call super().__init__(extra). This + will call set_write_buffer_limits(). + + The user may call set_write_buffer_limits() and + get_write_buffer_size(), and their protocol's pause_writing() and + resume_writing() may be called. + """ + + def __init__(self, extra=None, loop=None): + super().__init__(extra) + assert loop is not None + self._loop = loop + self._protocol_paused = False + self._set_write_buffer_limits() + + def _maybe_pause_protocol(self): + size = self.get_write_buffer_size() + if size <= self._high_water: + return + if not self._protocol_paused: + self._protocol_paused = True + try: + self._protocol.pause_writing() + except Exception as exc: + self._loop.call_exception_handler({ + 'message': 'protocol.pause_writing() failed', + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + + def _maybe_resume_protocol(self): + if (self._protocol_paused and + self.get_write_buffer_size() <= self._low_water): + self._protocol_paused = False + try: + self._protocol.resume_writing() + except Exception as exc: + self._loop.call_exception_handler({ + 'message': 'protocol.resume_writing() failed', + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + + def get_write_buffer_limits(self): + return (self._low_water, self._high_water) + + def _set_write_buffer_limits(self, high=None, low=None): + if high is None: + if low is None: + high = 64*1024 + else: + high = 4*low + if low is None: + low = high // 4 + if not high >= low >= 0: + raise ValueError('high (%r) must be >= low (%r) must be >= 0' % + (high, low)) + self._high_water = high + self._low_water = low + + def set_write_buffer_limits(self, high=None, low=None): + self._set_write_buffer_limits(high=high, low=low) + self._maybe_pause_protocol() + + def get_write_buffer_size(self): + raise NotImplementedError diff --git a/Lib/asyncio/unix_events.py b/Lib/asyncio/unix_events.py new file mode 100644 index 0000000000..9db09b9d9b --- /dev/null +++ b/Lib/asyncio/unix_events.py @@ -0,0 +1,1074 @@ +"""Selector event loop for Unix with signal handling.""" + +import errno +import os +import signal +import socket +import stat +import subprocess +import sys +import threading +import warnings + + +from . import base_events +from . import base_subprocess +from . import compat +from . import constants +from . import coroutines +from . import events +from . import futures +from . import selector_events +from . import selectors +from . import transports +from .coroutines import coroutine +from .log import logger + + +__all__ = ['SelectorEventLoop', + 'AbstractChildWatcher', 'SafeChildWatcher', + 'FastChildWatcher', 'DefaultEventLoopPolicy', + ] + +if sys.platform == 'win32': # pragma: no cover + raise ImportError('Signals are not really supported on Windows') + + +def _sighandler_noop(signum, frame): + """Dummy signal handler.""" + pass + + +try: + _fspath = os.fspath +except AttributeError: + # Python 3.5 or earlier + _fspath = lambda path: path + + +class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop): + """Unix event loop. + + Adds signal handling and UNIX Domain Socket support to SelectorEventLoop. + """ + + def __init__(self, selector=None): + super().__init__(selector) + self._signal_handlers = {} + + def _socketpair(self): + return socket.socketpair() + + def close(self): + super().close() + for sig in list(self._signal_handlers): + self.remove_signal_handler(sig) + + def _process_self_data(self, data): + for signum in data: + if not signum: + # ignore null bytes written by _write_to_self() + continue + self._handle_signal(signum) + + def add_signal_handler(self, sig, callback, *args): + """Add a handler for a signal. UNIX only. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + """ + if (coroutines.iscoroutine(callback) + or coroutines.iscoroutinefunction(callback)): + raise TypeError("coroutines cannot be used " + "with add_signal_handler()") + self._check_signal(sig) + self._check_closed() + try: + # set_wakeup_fd() raises ValueError if this is not the + # main thread. By calling it early we ensure that an + # event loop running in another thread cannot add a signal + # handler. + signal.set_wakeup_fd(self._csock.fileno()) + except (ValueError, OSError) as exc: + raise RuntimeError(str(exc)) + + handle = events.Handle(callback, args, self) + self._signal_handlers[sig] = handle + + try: + # Register a dummy signal handler to ask Python to write the signal + # number in the wakup file descriptor. _process_self_data() will + # read signal numbers from this file descriptor to handle signals. + signal.signal(sig, _sighandler_noop) + + # Set SA_RESTART to limit EINTR occurrences. + signal.siginterrupt(sig, False) + except OSError as exc: + del self._signal_handlers[sig] + if not self._signal_handlers: + try: + signal.set_wakeup_fd(-1) + except (ValueError, OSError) as nexc: + logger.info('set_wakeup_fd(-1) failed: %s', nexc) + + if exc.errno == errno.EINVAL: + raise RuntimeError('sig {} cannot be caught'.format(sig)) + else: + raise + + def _handle_signal(self, sig): + """Internal helper that is the actual signal handler.""" + handle = self._signal_handlers.get(sig) + if handle is None: + return # Assume it's some race condition. + if handle._cancelled: + self.remove_signal_handler(sig) # Remove it properly. + else: + self._add_callback_signalsafe(handle) + + def remove_signal_handler(self, sig): + """Remove a handler for a signal. UNIX only. + + Return True if a signal handler was removed, False if not. + """ + self._check_signal(sig) + try: + del self._signal_handlers[sig] + except KeyError: + return False + + if sig == signal.SIGINT: + handler = signal.default_int_handler + else: + handler = signal.SIG_DFL + + try: + signal.signal(sig, handler) + except OSError as exc: + if exc.errno == errno.EINVAL: + raise RuntimeError('sig {} cannot be caught'.format(sig)) + else: + raise + + if not self._signal_handlers: + try: + signal.set_wakeup_fd(-1) + except (ValueError, OSError) as exc: + logger.info('set_wakeup_fd(-1) failed: %s', exc) + + return True + + def _check_signal(self, sig): + """Internal helper to validate a signal. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + """ + if not isinstance(sig, int): + raise TypeError('sig must be an int, not {!r}'.format(sig)) + + if not (1 <= sig < signal.NSIG): + raise ValueError( + 'sig {} out of range(1, {})'.format(sig, signal.NSIG)) + + def _make_read_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra) + + def _make_write_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra) + + @coroutine + def _make_subprocess_transport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + extra=None, **kwargs): + with events.get_child_watcher() as watcher: + waiter = self.create_future() + transp = _UnixSubprocessTransport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + waiter=waiter, extra=extra, + **kwargs) + + watcher.add_child_handler(transp.get_pid(), + self._child_watcher_callback, transp) + try: + yield from waiter + except Exception as exc: + # Workaround CPython bug #23353: using yield/yield-from in an + # except block of a generator doesn't clear properly + # sys.exc_info() + err = exc + else: + err = None + + if err is not None: + transp.close() + yield from transp._wait() + raise err + + return transp + + def _child_watcher_callback(self, pid, returncode, transp): + self.call_soon_threadsafe(transp._process_exited, returncode) + + @coroutine + def create_unix_connection(self, protocol_factory, path, *, + ssl=None, sock=None, + server_hostname=None): + assert server_hostname is None or isinstance(server_hostname, str) + if ssl: + if server_hostname is None: + raise ValueError( + 'you have to pass server_hostname when using ssl') + else: + if server_hostname is not None: + raise ValueError('server_hostname is only meaningful with ssl') + + if path is not None: + if sock is not None: + raise ValueError( + 'path and sock can not be specified at the same time') + + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) + try: + sock.setblocking(False) + yield from self.sock_connect(sock, path) + except: + sock.close() + raise + + else: + if sock is None: + raise ValueError('no path and sock were specified') + if (sock.family != socket.AF_UNIX or + not base_events._is_stream_socket(sock)): + raise ValueError( + 'A UNIX Domain Stream Socket was expected, got {!r}' + .format(sock)) + sock.setblocking(False) + + transport, protocol = yield from self._create_connection_transport( + sock, protocol_factory, ssl, server_hostname) + return transport, protocol + + @coroutine + def create_unix_server(self, protocol_factory, path=None, *, + sock=None, backlog=100, ssl=None): + if isinstance(ssl, bool): + raise TypeError('ssl argument must be an SSLContext or None') + + if path is not None: + if sock is not None: + raise ValueError( + 'path and sock can not be specified at the same time') + + path = _fspath(path) + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + + # Check for abstract socket. `str` and `bytes` paths are supported. + if path[0] not in (0, '\x00'): + try: + if stat.S_ISSOCK(os.stat(path).st_mode): + os.remove(path) + except FileNotFoundError: + pass + except OSError as err: + # Directory may have permissions only to create socket. + logger.error('Unable to check or remove stale UNIX socket %r: %r', path, err) + + try: + sock.bind(path) + except OSError as exc: + sock.close() + if exc.errno == errno.EADDRINUSE: + # Let's improve the error message by adding + # with what exact address it occurs. + msg = 'Address {!r} is already in use'.format(path) + raise OSError(errno.EADDRINUSE, msg) from None + else: + raise + except: + sock.close() + raise + else: + if sock is None: + raise ValueError( + 'path was not specified, and no sock specified') + + if (sock.family != socket.AF_UNIX or + not base_events._is_stream_socket(sock)): + raise ValueError( + 'A UNIX Domain Stream Socket was expected, got {!r}' + .format(sock)) + + server = base_events.Server(self, [sock]) + sock.listen(backlog) + sock.setblocking(False) + self._start_serving(protocol_factory, sock, ssl, server) + return server + + +#if hasattr(os, 'set_blocking'): +# def _set_nonblocking(fd): +# os.set_blocking(fd, False) +#else: +# import fcntl + +# def _set_nonblocking(fd): +# flags = fcntl.fcntl(fd, fcntl.F_GETFL) +# flags = flags | os.O_NONBLOCK +# fcntl.fcntl(fd, fcntl.F_SETFL, flags) + + +class _UnixReadPipeTransport(transports.ReadTransport): + + max_size = 256 * 1024 # max bytes we read in one event loop iteration + + def __init__(self, loop, pipe, protocol, waiter=None, extra=None): + super().__init__(extra) + self._extra['pipe'] = pipe + self._loop = loop + self._pipe = pipe + self._fileno = pipe.fileno() + self._protocol = protocol + self._closing = False + + mode = os.fstat(self._fileno).st_mode + if not (stat.S_ISFIFO(mode) or + stat.S_ISSOCK(mode) or + stat.S_ISCHR(mode)): + self._pipe = None + self._fileno = None + self._protocol = None + raise ValueError("Pipe transport is for pipes/sockets only.") + + _set_nonblocking(self._fileno) + + self._loop.call_soon(self._protocol.connection_made, self) + # only start reading when connection_made() has been called + self._loop.call_soon(self._loop._add_reader, + self._fileno, self._read_ready) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def __repr__(self): + info = [self.__class__.__name__] + if self._pipe is None: + info.append('closed') + elif self._closing: + info.append('closing') + info.append('fd=%s' % self._fileno) + selector = getattr(self._loop, '_selector', None) + if self._pipe is not None and selector is not None: + polling = selector_events._test_selector_event( + selector, + self._fileno, selectors.EVENT_READ) + if polling: + info.append('polling') + else: + info.append('idle') + elif self._pipe is not None: + info.append('open') + else: + info.append('closed') + return '<%s>' % ' '.join(info) + + def _read_ready(self): + try: + data = os.read(self._fileno, self.max_size) + except (BlockingIOError, InterruptedError): + pass + except OSError as exc: + self._fatal_error(exc, 'Fatal read error on pipe transport') + else: + if data: + self._protocol.data_received(data) + else: + if self._loop.get_debug(): + logger.info("%r was closed by peer", self) + self._closing = True + self._loop._remove_reader(self._fileno) + self._loop.call_soon(self._protocol.eof_received) + self._loop.call_soon(self._call_connection_lost, None) + + def pause_reading(self): + self._loop._remove_reader(self._fileno) + + def resume_reading(self): + self._loop._add_reader(self._fileno, self._read_ready) + + def set_protocol(self, protocol): + self._protocol = protocol + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closing + + def close(self): + if not self._closing: + self._close(None) + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if self._pipe is not None: + warnings.warn("unclosed transport %r" % self, ResourceWarning, + source=self) + self._pipe.close() + + def _fatal_error(self, exc, message='Fatal error on pipe transport'): + # should be called by exception handler only + if (isinstance(exc, OSError) and exc.errno == errno.EIO): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._close(exc) + + def _close(self, exc): + self._closing = True + self._loop._remove_reader(self._fileno) + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + self._pipe.close() + self._pipe = None + self._protocol = None + self._loop = None + + +class _UnixWritePipeTransport(transports._FlowControlMixin, + transports.WriteTransport): + + def __init__(self, loop, pipe, protocol, waiter=None, extra=None): + super().__init__(extra, loop) + self._extra['pipe'] = pipe + self._pipe = pipe + self._fileno = pipe.fileno() + self._protocol = protocol + self._buffer = bytearray() + self._conn_lost = 0 + self._closing = False # Set when close() or write_eof() called. + + mode = os.fstat(self._fileno).st_mode + is_char = stat.S_ISCHR(mode) + is_fifo = stat.S_ISFIFO(mode) + is_socket = stat.S_ISSOCK(mode) + if not (is_char or is_fifo or is_socket): + self._pipe = None + self._fileno = None + self._protocol = None + raise ValueError("Pipe transport is only for " + "pipes, sockets and character devices") + + _set_nonblocking(self._fileno) + self._loop.call_soon(self._protocol.connection_made, self) + + # On AIX, the reader trick (to be notified when the read end of the + # socket is closed) only works for sockets. On other platforms it + # works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.) + if is_socket or (is_fifo and not sys.platform.startswith("aix")): + # only start reading when connection_made() has been called + self._loop.call_soon(self._loop._add_reader, + self._fileno, self._read_ready) + + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def __repr__(self): + info = [self.__class__.__name__] + if self._pipe is None: + info.append('closed') + elif self._closing: + info.append('closing') + info.append('fd=%s' % self._fileno) + selector = getattr(self._loop, '_selector', None) + if self._pipe is not None and selector is not None: + polling = selector_events._test_selector_event( + selector, + self._fileno, selectors.EVENT_WRITE) + if polling: + info.append('polling') + else: + info.append('idle') + + bufsize = self.get_write_buffer_size() + info.append('bufsize=%s' % bufsize) + elif self._pipe is not None: + info.append('open') + else: + info.append('closed') + return '<%s>' % ' '.join(info) + + def get_write_buffer_size(self): + return len(self._buffer) + + def _read_ready(self): + # Pipe was closed by peer. + if self._loop.get_debug(): + logger.info("%r was closed by peer", self) + if self._buffer: + self._close(BrokenPipeError()) + else: + self._close() + + def write(self, data): + assert isinstance(data, (bytes, bytearray, memoryview)), repr(data) + if isinstance(data, bytearray): + data = memoryview(data) + if not data: + return + + if self._conn_lost or self._closing: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('pipe closed by peer or ' + 'os.write(pipe, data) raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + # Attempt to send it right away first. + try: + n = os.write(self._fileno, data) + except (BlockingIOError, InterruptedError): + n = 0 + except Exception as exc: + self._conn_lost += 1 + self._fatal_error(exc, 'Fatal write error on pipe transport') + return + if n == len(data): + return + elif n > 0: + data = memoryview(data)[n:] + self._loop._add_writer(self._fileno, self._write_ready) + + self._buffer += data + self._maybe_pause_protocol() + + def _write_ready(self): + assert self._buffer, 'Data should not be empty' + + try: + n = os.write(self._fileno, self._buffer) + except (BlockingIOError, InterruptedError): + pass + except Exception as exc: + self._buffer.clear() + self._conn_lost += 1 + # Remove writer here, _fatal_error() doesn't it + # because _buffer is empty. + self._loop._remove_writer(self._fileno) + self._fatal_error(exc, 'Fatal write error on pipe transport') + else: + if n == len(self._buffer): + self._buffer.clear() + self._loop._remove_writer(self._fileno) + self._maybe_resume_protocol() # May append to buffer. + if self._closing: + self._loop._remove_reader(self._fileno) + self._call_connection_lost(None) + return + elif n > 0: + del self._buffer[:n] + + def can_write_eof(self): + return True + + def write_eof(self): + if self._closing: + return + assert self._pipe + self._closing = True + if not self._buffer: + self._loop._remove_reader(self._fileno) + self._loop.call_soon(self._call_connection_lost, None) + + def set_protocol(self, protocol): + self._protocol = protocol + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closing + + def close(self): + if self._pipe is not None and not self._closing: + # write_eof is all what we needed to close the write pipe + self.write_eof() + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if self._pipe is not None: + warnings.warn("unclosed transport %r" % self, ResourceWarning, + source=self) + self._pipe.close() + + def abort(self): + self._close(None) + + def _fatal_error(self, exc, message='Fatal error on pipe transport'): + # should be called by exception handler only + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._close(exc) + + def _close(self, exc=None): + self._closing = True + if self._buffer: + self._loop._remove_writer(self._fileno) + self._buffer.clear() + self._loop._remove_reader(self._fileno) + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + self._pipe.close() + self._pipe = None + self._protocol = None + self._loop = None + + +#if hasattr(os, 'set_inheritable'): +# # Python 3.4 and newer +# _set_inheritable = os.set_inheritable +#else: +# import fcntl +# +# def _set_inheritable(fd, inheritable): +# cloexec_flag = getattr(fcntl, 'FD_CLOEXEC', 1) +# +# old = fcntl.fcntl(fd, fcntl.F_GETFD) +# if not inheritable: +# fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag) +# else: +# fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag) + + +class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport): + + def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): + stdin_w = None + if stdin == subprocess.PIPE: + # Use a socket pair for stdin, since not all platforms + # support selecting read events on the write end of a + # socket (which we use in order to detect closing of the + # other end). Notably this is needed on AIX, and works + # just fine on other platforms. + stdin, stdin_w = self._loop._socketpair() + + # Mark the write end of the stdin pipe as non-inheritable, + # needed by close_fds=False on Python 3.3 and older + # (Python 3.4 implements the PEP 446, socketpair returns + # non-inheritable sockets) + _set_inheritable(stdin_w.fileno(), False) + self._proc = subprocess.Popen( + args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, + universal_newlines=False, bufsize=bufsize, **kwargs) + if stdin_w is not None: + stdin.close() + self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize) + + +class AbstractChildWatcher: + """Abstract base class for monitoring child processes. + + Objects derived from this class monitor a collection of subprocesses and + report their termination or interruption by a signal. + + New callbacks are registered with .add_child_handler(). Starting a new + process must be done within a 'with' block to allow the watcher to suspend + its activity until the new process if fully registered (this is needed to + prevent a race condition in some implementations). + + Example: + with watcher: + proc = subprocess.Popen("sleep 1") + watcher.add_child_handler(proc.pid, callback) + + Notes: + Implementations of this class must be thread-safe. + + Since child watcher objects may catch the SIGCHLD signal and call + waitpid(-1), there should be only one active object per process. + """ + + def add_child_handler(self, pid, callback, *args): + """Register a new child handler. + + Arrange for callback(pid, returncode, *args) to be called when + process 'pid' terminates. Specifying another callback for the same + process replaces the previous handler. + + Note: callback() must be thread-safe. + """ + raise NotImplementedError() + + def remove_child_handler(self, pid): + """Removes the handler for process 'pid'. + + The function returns True if the handler was successfully removed, + False if there was nothing to remove.""" + + raise NotImplementedError() + + def attach_loop(self, loop): + """Attach the watcher to an event loop. + + If the watcher was previously attached to an event loop, then it is + first detached before attaching to the new loop. + + Note: loop may be None. + """ + raise NotImplementedError() + + def close(self): + """Close the watcher. + + This must be called to make sure that any underlying resource is freed. + """ + raise NotImplementedError() + + def __enter__(self): + """Enter the watcher's context and allow starting new processes + + This function must return self""" + raise NotImplementedError() + + def __exit__(self, a, b, c): + """Exit the watcher's context""" + raise NotImplementedError() + + +class BaseChildWatcher(AbstractChildWatcher): + + def __init__(self): + self._loop = None + self._callbacks = {} + + def close(self): + self.attach_loop(None) + + def _do_waitpid(self, expected_pid): + raise NotImplementedError() + + def _do_waitpid_all(self): + raise NotImplementedError() + + def attach_loop(self, loop): + assert loop is None or isinstance(loop, events.AbstractEventLoop) + + if self._loop is not None and loop is None and self._callbacks: + warnings.warn( + 'A loop is being detached ' + 'from a child watcher with pending handlers', + RuntimeWarning) + + if self._loop is not None: + self._loop.remove_signal_handler(signal.SIGCHLD) + + self._loop = loop + if loop is not None: + loop.add_signal_handler(signal.SIGCHLD, self._sig_chld) + + # Prevent a race condition in case a child terminated + # during the switch. + self._do_waitpid_all() + + def _sig_chld(self): + try: + self._do_waitpid_all() + except Exception as exc: + # self._loop should always be available here + # as '_sig_chld' is added as a signal handler + # in 'attach_loop' + self._loop.call_exception_handler({ + 'message': 'Unknown exception in SIGCHLD handler', + 'exception': exc, + }) + + def _compute_returncode(self, status): + if os.WIFSIGNALED(status): + # The child process died because of a signal. + return -os.WTERMSIG(status) + elif os.WIFEXITED(status): + # The child process exited (e.g sys.exit()). + return os.WEXITSTATUS(status) + else: + # The child exited, but we don't understand its status. + # This shouldn't happen, but if it does, let's just + # return that status; perhaps that helps debug it. + return status + + +class SafeChildWatcher(BaseChildWatcher): + """'Safe' child watcher implementation. + + This implementation avoids disrupting other code spawning processes by + polling explicitly each process in the SIGCHLD handler instead of calling + os.waitpid(-1). + + This is a safe solution but it has a significant overhead when handling a + big number of children (O(n) each time SIGCHLD is raised) + """ + + def close(self): + self._callbacks.clear() + super().close() + + def __enter__(self): + return self + + def __exit__(self, a, b, c): + pass + + def add_child_handler(self, pid, callback, *args): + if self._loop is None: + raise RuntimeError( + "Cannot add child handler, " + "the child watcher does not have a loop attached") + + self._callbacks[pid] = (callback, args) + + # Prevent a race condition in case the child is already terminated. + self._do_waitpid(pid) + + def remove_child_handler(self, pid): + try: + del self._callbacks[pid] + return True + except KeyError: + return False + + def _do_waitpid_all(self): + + for pid in list(self._callbacks): + self._do_waitpid(pid) + + def _do_waitpid(self, expected_pid): + assert expected_pid > 0 + + try: + pid, status = os.waitpid(expected_pid, os.WNOHANG) + except ChildProcessError: + # The child process is already reaped + # (may happen if waitpid() is called elsewhere). + pid = expected_pid + returncode = 255 + logger.warning( + "Unknown child process pid %d, will report returncode 255", + pid) + else: + if pid == 0: + # The child process is still alive. + return + + returncode = self._compute_returncode(status) + if self._loop.get_debug(): + logger.debug('process %s exited with returncode %s', + expected_pid, returncode) + + try: + callback, args = self._callbacks.pop(pid) + except KeyError: # pragma: no cover + # May happen if .remove_child_handler() is called + # after os.waitpid() returns. + if self._loop.get_debug(): + logger.warning("Child watcher got an unexpected pid: %r", + pid, exc_info=True) + else: + callback(pid, returncode, *args) + + +class FastChildWatcher(BaseChildWatcher): + """'Fast' child watcher implementation. + + This implementation reaps every terminated processes by calling + os.waitpid(-1) directly, possibly breaking other code spawning processes + and waiting for their termination. + + There is no noticeable overhead when handling a big number of children + (O(1) each time a child terminates). + """ + def __init__(self): + super().__init__() + self._lock = threading.Lock() + self._zombies = {} + self._forks = 0 + + def close(self): + self._callbacks.clear() + self._zombies.clear() + super().close() + + def __enter__(self): + with self._lock: + self._forks += 1 + + return self + + def __exit__(self, a, b, c): + with self._lock: + self._forks -= 1 + + if self._forks or not self._zombies: + return + + collateral_victims = str(self._zombies) + self._zombies.clear() + + logger.warning( + "Caught subprocesses termination from unknown pids: %s", + collateral_victims) + + def add_child_handler(self, pid, callback, *args): + assert self._forks, "Must use the context manager" + + if self._loop is None: + raise RuntimeError( + "Cannot add child handler, " + "the child watcher does not have a loop attached") + + with self._lock: + try: + returncode = self._zombies.pop(pid) + except KeyError: + # The child is running. + self._callbacks[pid] = callback, args + return + + # The child is dead already. We can fire the callback. + callback(pid, returncode, *args) + + def remove_child_handler(self, pid): + try: + del self._callbacks[pid] + return True + except KeyError: + return False + + def _do_waitpid_all(self): + # Because of signal coalescing, we must keep calling waitpid() as + # long as we're able to reap a child. + while True: + try: + pid, status = os.waitpid(-1, os.WNOHANG) + except ChildProcessError: + # No more child processes exist. + return + else: + if pid == 0: + # A child process is still alive. + return + + returncode = self._compute_returncode(status) + + with self._lock: + try: + callback, args = self._callbacks.pop(pid) + except KeyError: + # unknown child + if self._forks: + # It may not be registered yet. + self._zombies[pid] = returncode + if self._loop.get_debug(): + logger.debug('unknown process %s exited ' + 'with returncode %s', + pid, returncode) + continue + callback = None + else: + if self._loop.get_debug(): + logger.debug('process %s exited with returncode %s', + pid, returncode) + + if callback is None: + logger.warning( + "Caught subprocess termination from unknown pid: " + "%d -> %d", pid, returncode) + else: + callback(pid, returncode, *args) + + +class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy): + """UNIX event loop policy with a watcher for child processes.""" + _loop_factory = _UnixSelectorEventLoop + + def __init__(self): + super().__init__() + self._watcher = None + + def _init_watcher(self): + with events._lock: + if self._watcher is None: # pragma: no branch + self._watcher = SafeChildWatcher() + if isinstance(threading.current_thread(), + threading._MainThread): + self._watcher.attach_loop(self._local._loop) + + def set_event_loop(self, loop): + """Set the event loop. + + As a side effect, if a child watcher was set before, then calling + .set_event_loop() from the main thread will call .attach_loop(loop) on + the child watcher. + """ + + super().set_event_loop(loop) + + if self._watcher is not None and \ + isinstance(threading.current_thread(), threading._MainThread): + self._watcher.attach_loop(loop) + + def get_child_watcher(self): + """Get the watcher for child processes. + + If not yet set, a SafeChildWatcher object is automatically created. + """ + if self._watcher is None: + self._init_watcher() + + return self._watcher + + def set_child_watcher(self, watcher): + """Set the watcher for child processes.""" + + assert watcher is None or isinstance(watcher, AbstractChildWatcher) + + if self._watcher is not None: + self._watcher.close() + + self._watcher = watcher + +SelectorEventLoop = _UnixSelectorEventLoop +DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy diff --git a/Lib/asyncio/windows_events.py b/Lib/asyncio/windows_events.py new file mode 100644 index 0000000000..2c68bc526a --- /dev/null +++ b/Lib/asyncio/windows_events.py @@ -0,0 +1,780 @@ +"""Selector and proactor event loops for Windows.""" + +import _winapi +import errno +import math +import socket +import struct +import weakref + +from . import events +from . import base_subprocess +from . import futures +from . import proactor_events +from . import selector_events +from . import tasks +from . import windows_utils +# XXX RustPython TODO: _overlapped +# from . import _overlapped +from .coroutines import coroutine +from .log import logger + + +__all__ = ['SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor', + 'DefaultEventLoopPolicy', + ] + + +NULL = 0 +INFINITE = 0xffffffff +ERROR_CONNECTION_REFUSED = 1225 +ERROR_CONNECTION_ABORTED = 1236 + +# Initial delay in seconds for connect_pipe() before retrying to connect +CONNECT_PIPE_INIT_DELAY = 0.001 + +# Maximum delay in seconds for connect_pipe() before retrying to connect +CONNECT_PIPE_MAX_DELAY = 0.100 + + +class _OverlappedFuture(futures.Future): + """Subclass of Future which represents an overlapped operation. + + Cancelling it will immediately cancel the overlapped operation. + """ + + def __init__(self, ov, *, loop=None): + super().__init__(loop=loop) + if self._source_traceback: + del self._source_traceback[-1] + self._ov = ov + + def _repr_info(self): + info = super()._repr_info() + if self._ov is not None: + state = 'pending' if self._ov.pending else 'completed' + info.insert(1, 'overlapped=<%s, %#x>' % (state, self._ov.address)) + return info + + def _cancel_overlapped(self): + if self._ov is None: + return + try: + self._ov.cancel() + except OSError as exc: + context = { + 'message': 'Cancelling an overlapped future failed', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + self._ov = None + + def cancel(self): + self._cancel_overlapped() + return super().cancel() + + def set_exception(self, exception): + super().set_exception(exception) + self._cancel_overlapped() + + def set_result(self, result): + super().set_result(result) + self._ov = None + + +class _BaseWaitHandleFuture(futures.Future): + """Subclass of Future which represents a wait handle.""" + + def __init__(self, ov, handle, wait_handle, *, loop=None): + super().__init__(loop=loop) + if self._source_traceback: + del self._source_traceback[-1] + # Keep a reference to the Overlapped object to keep it alive until the + # wait is unregistered + self._ov = ov + self._handle = handle + self._wait_handle = wait_handle + + # Should we call UnregisterWaitEx() if the wait completes + # or is cancelled? + self._registered = True + + def _poll(self): + # non-blocking wait: use a timeout of 0 millisecond + return (_winapi.WaitForSingleObject(self._handle, 0) == + _winapi.WAIT_OBJECT_0) + + def _repr_info(self): + info = super()._repr_info() + info.append('handle=%#x' % self._handle) + if self._handle is not None: + state = 'signaled' if self._poll() else 'waiting' + info.append(state) + if self._wait_handle is not None: + info.append('wait_handle=%#x' % self._wait_handle) + return info + + def _unregister_wait_cb(self, fut): + # The wait was unregistered: it's not safe to destroy the Overlapped + # object + self._ov = None + + def _unregister_wait(self): + if not self._registered: + return + self._registered = False + + wait_handle = self._wait_handle + self._wait_handle = None + try: + _overlapped.UnregisterWait(wait_handle) + except OSError as exc: + if exc.winerror != _overlapped.ERROR_IO_PENDING: + context = { + 'message': 'Failed to unregister the wait handle', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + return + # ERROR_IO_PENDING means that the unregister is pending + + self._unregister_wait_cb(None) + + def cancel(self): + self._unregister_wait() + return super().cancel() + + def set_exception(self, exception): + self._unregister_wait() + super().set_exception(exception) + + def set_result(self, result): + self._unregister_wait() + super().set_result(result) + + +class _WaitCancelFuture(_BaseWaitHandleFuture): + """Subclass of Future which represents a wait for the cancellation of a + _WaitHandleFuture using an event. + """ + + def __init__(self, ov, event, wait_handle, *, loop=None): + super().__init__(ov, event, wait_handle, loop=loop) + + self._done_callback = None + + def cancel(self): + raise RuntimeError("_WaitCancelFuture must not be cancelled") + + def set_result(self, result): + super().set_result(result) + if self._done_callback is not None: + self._done_callback(self) + + def set_exception(self, exception): + super().set_exception(exception) + if self._done_callback is not None: + self._done_callback(self) + + +class _WaitHandleFuture(_BaseWaitHandleFuture): + def __init__(self, ov, handle, wait_handle, proactor, *, loop=None): + super().__init__(ov, handle, wait_handle, loop=loop) + self._proactor = proactor + self._unregister_proactor = True + self._event = _overlapped.CreateEvent(None, True, False, None) + self._event_fut = None + + def _unregister_wait_cb(self, fut): + if self._event is not None: + _winapi.CloseHandle(self._event) + self._event = None + self._event_fut = None + + # If the wait was cancelled, the wait may never be signalled, so + # it's required to unregister it. Otherwise, IocpProactor.close() will + # wait forever for an event which will never come. + # + # If the IocpProactor already received the event, it's safe to call + # _unregister() because we kept a reference to the Overlapped object + # which is used as a unique key. + self._proactor._unregister(self._ov) + self._proactor = None + + super()._unregister_wait_cb(fut) + + def _unregister_wait(self): + if not self._registered: + return + self._registered = False + + wait_handle = self._wait_handle + self._wait_handle = None + try: + _overlapped.UnregisterWaitEx(wait_handle, self._event) + except OSError as exc: + if exc.winerror != _overlapped.ERROR_IO_PENDING: + context = { + 'message': 'Failed to unregister the wait handle', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + return + # ERROR_IO_PENDING is not an error, the wait was unregistered + + self._event_fut = self._proactor._wait_cancel(self._event, + self._unregister_wait_cb) + + +class PipeServer(object): + """Class representing a pipe server. + + This is much like a bound, listening socket. + """ + def __init__(self, address): + self._address = address + self._free_instances = weakref.WeakSet() + # initialize the pipe attribute before calling _server_pipe_handle() + # because this function can raise an exception and the destructor calls + # the close() method + self._pipe = None + self._accept_pipe_future = None + self._pipe = self._server_pipe_handle(True) + + def _get_unconnected_pipe(self): + # Create new instance and return previous one. This ensures + # that (until the server is closed) there is always at least + # one pipe handle for address. Therefore if a client attempt + # to connect it will not fail with FileNotFoundError. + tmp, self._pipe = self._pipe, self._server_pipe_handle(False) + return tmp + + def _server_pipe_handle(self, first): + # Return a wrapper for a new pipe handle. + if self.closed(): + return None + flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED + if first: + flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + h = _winapi.CreateNamedPipe( + self._address, flags, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + _winapi.PIPE_UNLIMITED_INSTANCES, + windows_utils.BUFSIZE, windows_utils.BUFSIZE, + _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL) + pipe = windows_utils.PipeHandle(h) + self._free_instances.add(pipe) + return pipe + + def closed(self): + return (self._address is None) + + def close(self): + if self._accept_pipe_future is not None: + self._accept_pipe_future.cancel() + self._accept_pipe_future = None + # Close all instances which have not been connected to by a client. + if self._address is not None: + for pipe in self._free_instances: + pipe.close() + self._pipe = None + self._address = None + self._free_instances.clear() + + __del__ = close + + +class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop): + """Windows version of selector event loop.""" + + def _socketpair(self): + return windows_utils.socketpair() + + +class ProactorEventLoop(proactor_events.BaseProactorEventLoop): + """Windows version of proactor event loop using IOCP.""" + + def __init__(self, proactor=None): + if proactor is None: + proactor = IocpProactor() + super().__init__(proactor) + + def _socketpair(self): + return windows_utils.socketpair() + + @coroutine + def create_pipe_connection(self, protocol_factory, address): + f = self._proactor.connect_pipe(address) + pipe = yield from f + protocol = protocol_factory() + trans = self._make_duplex_pipe_transport(pipe, protocol, + extra={'addr': address}) + return trans, protocol + + @coroutine + def start_serving_pipe(self, protocol_factory, address): + server = PipeServer(address) + + def loop_accept_pipe(f=None): + pipe = None + try: + if f: + pipe = f.result() + server._free_instances.discard(pipe) + + if server.closed(): + # A client connected before the server was closed: + # drop the client (close the pipe) and exit + pipe.close() + return + + protocol = protocol_factory() + self._make_duplex_pipe_transport( + pipe, protocol, extra={'addr': address}) + + pipe = server._get_unconnected_pipe() + if pipe is None: + return + + f = self._proactor.accept_pipe(pipe) + except OSError as exc: + if pipe and pipe.fileno() != -1: + self.call_exception_handler({ + 'message': 'Pipe accept failed', + 'exception': exc, + 'pipe': pipe, + }) + pipe.close() + elif self._debug: + logger.warning("Accept pipe failed on pipe %r", + pipe, exc_info=True) + except futures.CancelledError: + if pipe: + pipe.close() + else: + server._accept_pipe_future = f + f.add_done_callback(loop_accept_pipe) + + self.call_soon(loop_accept_pipe) + return [server] + + @coroutine + def _make_subprocess_transport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + extra=None, **kwargs): + waiter = self.create_future() + transp = _WindowsSubprocessTransport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + waiter=waiter, extra=extra, + **kwargs) + try: + yield from waiter + except Exception as exc: + # Workaround CPython bug #23353: using yield/yield-from in an + # except block of a generator doesn't clear properly sys.exc_info() + err = exc + else: + err = None + + if err is not None: + transp.close() + yield from transp._wait() + raise err + + return transp + + +class IocpProactor: + """Proactor implementation using IOCP.""" + + def __init__(self, concurrency=0xffffffff): + self._loop = None + self._results = [] + self._iocp = _overlapped.CreateIoCompletionPort( + _overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency) + self._cache = {} + self._registered = weakref.WeakSet() + self._unregistered = [] + self._stopped_serving = weakref.WeakSet() + + def __repr__(self): + return ('<%s overlapped#=%s result#=%s>' + % (self.__class__.__name__, len(self._cache), + len(self._results))) + + def set_loop(self, loop): + self._loop = loop + + def select(self, timeout=None): + if not self._results: + self._poll(timeout) + tmp = self._results + self._results = [] + return tmp + + def _result(self, value): + fut = self._loop.create_future() + fut.set_result(value) + return fut + + def recv(self, conn, nbytes, flags=0): + self._register_with_iocp(conn) + ov = _overlapped.Overlapped(NULL) + try: + if isinstance(conn, socket.socket): + ov.WSARecv(conn.fileno(), nbytes, flags) + else: + ov.ReadFile(conn.fileno(), nbytes) + except BrokenPipeError: + return self._result(b'') + + def finish_recv(trans, key, ov): + try: + return ov.getresult() + except OSError as exc: + if exc.winerror == _overlapped.ERROR_NETNAME_DELETED: + raise ConnectionResetError(*exc.args) + else: + raise + + return self._register(ov, conn, finish_recv) + + def send(self, conn, buf, flags=0): + self._register_with_iocp(conn) + ov = _overlapped.Overlapped(NULL) + if isinstance(conn, socket.socket): + ov.WSASend(conn.fileno(), buf, flags) + else: + ov.WriteFile(conn.fileno(), buf) + + def finish_send(trans, key, ov): + try: + return ov.getresult() + except OSError as exc: + if exc.winerror == _overlapped.ERROR_NETNAME_DELETED: + raise ConnectionResetError(*exc.args) + else: + raise + + return self._register(ov, conn, finish_send) + + def accept(self, listener): + self._register_with_iocp(listener) + conn = self._get_accept_socket(listener.family) + ov = _overlapped.Overlapped(NULL) + ov.AcceptEx(listener.fileno(), conn.fileno()) + + def finish_accept(trans, key, ov): + ov.getresult() + # Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work. + buf = struct.pack('@P', listener.fileno()) + conn.setsockopt(socket.SOL_SOCKET, + _overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf) + conn.settimeout(listener.gettimeout()) + return conn, conn.getpeername() + + @coroutine + def accept_coro(future, conn): + # Coroutine closing the accept socket if the future is cancelled + try: + yield from future + except futures.CancelledError: + conn.close() + raise + + future = self._register(ov, listener, finish_accept) + coro = accept_coro(future, conn) + tasks.ensure_future(coro, loop=self._loop) + return future + + def connect(self, conn, address): + self._register_with_iocp(conn) + # The socket needs to be locally bound before we call ConnectEx(). + try: + _overlapped.BindLocal(conn.fileno(), conn.family) + except OSError as e: + if e.winerror != errno.WSAEINVAL: + raise + # Probably already locally bound; check using getsockname(). + if conn.getsockname()[1] == 0: + raise + ov = _overlapped.Overlapped(NULL) + ov.ConnectEx(conn.fileno(), address) + + def finish_connect(trans, key, ov): + ov.getresult() + # Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work. + conn.setsockopt(socket.SOL_SOCKET, + _overlapped.SO_UPDATE_CONNECT_CONTEXT, 0) + return conn + + return self._register(ov, conn, finish_connect) + + def accept_pipe(self, pipe): + self._register_with_iocp(pipe) + ov = _overlapped.Overlapped(NULL) + connected = ov.ConnectNamedPipe(pipe.fileno()) + + if connected: + # ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means + # that the pipe is connected. There is no need to wait for the + # completion of the connection. + return self._result(pipe) + + def finish_accept_pipe(trans, key, ov): + ov.getresult() + return pipe + + return self._register(ov, pipe, finish_accept_pipe) + + @coroutine + def connect_pipe(self, address): + delay = CONNECT_PIPE_INIT_DELAY + while True: + # Unfortunately there is no way to do an overlapped connect to a pipe. + # Call CreateFile() in a loop until it doesn't fail with + # ERROR_PIPE_BUSY + try: + handle = _overlapped.ConnectPipe(address) + break + except OSError as exc: + if exc.winerror != _overlapped.ERROR_PIPE_BUSY: + raise + + # ConnectPipe() failed with ERROR_PIPE_BUSY: retry later + delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY) + yield from tasks.sleep(delay, loop=self._loop) + + return windows_utils.PipeHandle(handle) + + def wait_for_handle(self, handle, timeout=None): + """Wait for a handle. + + Return a Future object. The result of the future is True if the wait + completed, or False if the wait did not complete (on timeout). + """ + return self._wait_for_handle(handle, timeout, False) + + def _wait_cancel(self, event, done_callback): + fut = self._wait_for_handle(event, None, True) + # add_done_callback() cannot be used because the wait may only complete + # in IocpProactor.close(), while the event loop is not running. + fut._done_callback = done_callback + return fut + + def _wait_for_handle(self, handle, timeout, _is_cancel): + if timeout is None: + ms = _winapi.INFINITE + else: + # RegisterWaitForSingleObject() has a resolution of 1 millisecond, + # round away from zero to wait *at least* timeout seconds. + ms = math.ceil(timeout * 1e3) + + # We only create ov so we can use ov.address as a key for the cache. + ov = _overlapped.Overlapped(NULL) + wait_handle = _overlapped.RegisterWaitWithQueue( + handle, self._iocp, ov.address, ms) + if _is_cancel: + f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop) + else: + f = _WaitHandleFuture(ov, handle, wait_handle, self, + loop=self._loop) + if f._source_traceback: + del f._source_traceback[-1] + + def finish_wait_for_handle(trans, key, ov): + # Note that this second wait means that we should only use + # this with handles types where a successful wait has no + # effect. So events or processes are all right, but locks + # or semaphores are not. Also note if the handle is + # signalled and then quickly reset, then we may return + # False even though we have not timed out. + return f._poll() + + self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle) + return f + + def _register_with_iocp(self, obj): + # To get notifications of finished ops on this objects sent to the + # completion port, were must register the handle. + if obj not in self._registered: + self._registered.add(obj) + _overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0) + # XXX We could also use SetFileCompletionNotificationModes() + # to avoid sending notifications to completion port of ops + # that succeed immediately. + + def _register(self, ov, obj, callback): + # Return a future which will be set with the result of the + # operation when it completes. The future's value is actually + # the value returned by callback(). + f = _OverlappedFuture(ov, loop=self._loop) + if f._source_traceback: + del f._source_traceback[-1] + if not ov.pending: + # The operation has completed, so no need to postpone the + # work. We cannot take this short cut if we need the + # NumberOfBytes, CompletionKey values returned by + # PostQueuedCompletionStatus(). + try: + value = callback(None, None, ov) + except OSError as e: + f.set_exception(e) + else: + f.set_result(value) + # Even if GetOverlappedResult() was called, we have to wait for the + # notification of the completion in GetQueuedCompletionStatus(). + # Register the overlapped operation to keep a reference to the + # OVERLAPPED object, otherwise the memory is freed and Windows may + # read uninitialized memory. + + # Register the overlapped operation for later. Note that + # we only store obj to prevent it from being garbage + # collected too early. + self._cache[ov.address] = (f, ov, obj, callback) + return f + + def _unregister(self, ov): + """Unregister an overlapped object. + + Call this method when its future has been cancelled. The event can + already be signalled (pending in the proactor event queue). It is also + safe if the event is never signalled (because it was cancelled). + """ + self._unregistered.append(ov) + + def _get_accept_socket(self, family): + s = socket.socket(family) + s.settimeout(0) + return s + + def _poll(self, timeout=None): + if timeout is None: + ms = INFINITE + elif timeout < 0: + raise ValueError("negative timeout") + else: + # GetQueuedCompletionStatus() has a resolution of 1 millisecond, + # round away from zero to wait *at least* timeout seconds. + ms = math.ceil(timeout * 1e3) + if ms >= INFINITE: + raise ValueError("timeout too big") + + while True: + status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms) + if status is None: + break + ms = 0 + + err, transferred, key, address = status + try: + f, ov, obj, callback = self._cache.pop(address) + except KeyError: + if self._loop.get_debug(): + self._loop.call_exception_handler({ + 'message': ('GetQueuedCompletionStatus() returned an ' + 'unexpected event'), + 'status': ('err=%s transferred=%s key=%#x address=%#x' + % (err, transferred, key, address)), + }) + + # key is either zero, or it is used to return a pipe + # handle which should be closed to avoid a leak. + if key not in (0, _overlapped.INVALID_HANDLE_VALUE): + _winapi.CloseHandle(key) + continue + + if obj in self._stopped_serving: + f.cancel() + # Don't call the callback if _register() already read the result or + # if the overlapped has been cancelled + elif not f.done(): + try: + value = callback(transferred, key, ov) + except OSError as e: + f.set_exception(e) + self._results.append(f) + else: + f.set_result(value) + self._results.append(f) + + # Remove unregisted futures + for ov in self._unregistered: + self._cache.pop(ov.address, None) + self._unregistered.clear() + + def _stop_serving(self, obj): + # obj is a socket or pipe handle. It will be closed in + # BaseProactorEventLoop._stop_serving() which will make any + # pending operations fail quickly. + self._stopped_serving.add(obj) + + def close(self): + # Cancel remaining registered operations. + for address, (fut, ov, obj, callback) in list(self._cache.items()): + if fut.cancelled(): + # Nothing to do with cancelled futures + pass + elif isinstance(fut, _WaitCancelFuture): + # _WaitCancelFuture must not be cancelled + pass + else: + try: + fut.cancel() + except OSError as exc: + if self._loop is not None: + context = { + 'message': 'Cancelling a future failed', + 'exception': exc, + 'future': fut, + } + if fut._source_traceback: + context['source_traceback'] = fut._source_traceback + self._loop.call_exception_handler(context) + + while self._cache: + if not self._poll(1): + logger.debug('taking long time to close proactor') + + self._results = [] + if self._iocp is not None: + _winapi.CloseHandle(self._iocp) + self._iocp = None + + def __del__(self): + self.close() + + +class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport): + + def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): + self._proc = windows_utils.Popen( + args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, + bufsize=bufsize, **kwargs) + + def callback(f): + returncode = self._proc.poll() + self._process_exited(returncode) + + f = self._loop._proactor.wait_for_handle(int(self._proc._handle)) + f.add_done_callback(callback) + + +SelectorEventLoop = _WindowsSelectorEventLoop + + +class _WindowsDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy): + _loop_factory = SelectorEventLoop + + +DefaultEventLoopPolicy = _WindowsDefaultEventLoopPolicy diff --git a/Lib/asyncio/windows_utils.py b/Lib/asyncio/windows_utils.py new file mode 100644 index 0000000000..de7b71d809 --- /dev/null +++ b/Lib/asyncio/windows_utils.py @@ -0,0 +1,225 @@ +""" +Various Windows specific bits and pieces +""" + +import sys + +if sys.platform != 'win32': # pragma: no cover + raise ImportError('win32 only') + +import _winapi +import itertools +# XXX RustPython TODO: msvcrt +# import msvcrt +import os +import socket +import subprocess +import tempfile +import warnings + + +__all__ = ['socketpair', 'pipe', 'Popen', 'PIPE', 'PipeHandle'] + + +# Constants/globals + + +BUFSIZE = 8192 +PIPE = subprocess.PIPE +STDOUT = subprocess.STDOUT +_mmap_counter = itertools.count() + + +if hasattr(socket, 'socketpair'): + # Since Python 3.5, socket.socketpair() is now also available on Windows + socketpair = socket.socketpair +else: + # Replacement for socket.socketpair() + def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): + """A socket pair usable as a self-pipe, for Windows. + + Origin: https://gist.github.com/4325783, by Geert Jansen. + Public domain. + """ + if family == socket.AF_INET: + host = '127.0.0.1' + elif family == socket.AF_INET6: + host = '::1' + else: + raise ValueError("Only AF_INET and AF_INET6 socket address " + "families are supported") + if type != socket.SOCK_STREAM: + raise ValueError("Only SOCK_STREAM socket type is supported") + if proto != 0: + raise ValueError("Only protocol zero is supported") + + # We create a connected TCP socket. Note the trick with setblocking(0) + # that prevents us from having to create a thread. + lsock = socket.socket(family, type, proto) + try: + lsock.bind((host, 0)) + lsock.listen(1) + # On IPv6, ignore flow_info and scope_id + addr, port = lsock.getsockname()[:2] + csock = socket.socket(family, type, proto) + try: + csock.setblocking(False) + try: + csock.connect((addr, port)) + except (BlockingIOError, InterruptedError): + pass + csock.setblocking(True) + ssock, _ = lsock.accept() + except: + csock.close() + raise + finally: + lsock.close() + return (ssock, csock) + + +# Replacement for os.pipe() using handles instead of fds + + +def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE): + """Like os.pipe() but with overlapped support and using handles not fds.""" + address = tempfile.mktemp(prefix=r'\\.\pipe\python-pipe-%d-%d-' % + (os.getpid(), next(_mmap_counter))) + + if duplex: + openmode = _winapi.PIPE_ACCESS_DUPLEX + access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE + obsize, ibsize = bufsize, bufsize + else: + openmode = _winapi.PIPE_ACCESS_INBOUND + access = _winapi.GENERIC_WRITE + obsize, ibsize = 0, bufsize + + openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + + if overlapped[0]: + openmode |= _winapi.FILE_FLAG_OVERLAPPED + + if overlapped[1]: + flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED + else: + flags_and_attribs = 0 + + h1 = h2 = None + try: + h1 = _winapi.CreateNamedPipe( + address, openmode, _winapi.PIPE_WAIT, + 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL) + + h2 = _winapi.CreateFile( + address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, + flags_and_attribs, _winapi.NULL) + + ov = _winapi.ConnectNamedPipe(h1, overlapped=True) + ov.GetOverlappedResult(True) + return h1, h2 + except: + if h1 is not None: + _winapi.CloseHandle(h1) + if h2 is not None: + _winapi.CloseHandle(h2) + raise + + +# Wrapper for a pipe handle + + +class PipeHandle: + """Wrapper for an overlapped pipe handle which is vaguely file-object like. + + The IOCP event loop can use these instead of socket objects. + """ + def __init__(self, handle): + self._handle = handle + + def __repr__(self): + if self._handle is not None: + handle = 'handle=%r' % self._handle + else: + handle = 'closed' + return '<%s %s>' % (self.__class__.__name__, handle) + + @property + def handle(self): + return self._handle + + def fileno(self): + if self._handle is None: + raise ValueError("I/O operatioon on closed pipe") + return self._handle + + def close(self, *, CloseHandle=_winapi.CloseHandle): + if self._handle is not None: + CloseHandle(self._handle) + self._handle = None + + def __del__(self): + if self._handle is not None: + warnings.warn("unclosed %r" % self, ResourceWarning, + source=self) + self.close() + + def __enter__(self): + return self + + def __exit__(self, t, v, tb): + self.close() + + +# Replacement for subprocess.Popen using overlapped pipe handles + + +class Popen(subprocess.Popen): + """Replacement for subprocess.Popen using overlapped pipe handles. + + The stdin, stdout, stderr are None or instances of PipeHandle. + """ + def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds): + assert not kwds.get('universal_newlines') + assert kwds.get('bufsize', 0) == 0 + stdin_rfd = stdout_wfd = stderr_wfd = None + stdin_wh = stdout_rh = stderr_rh = None + if stdin == PIPE: + stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True) + stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY) + else: + stdin_rfd = stdin + if stdout == PIPE: + stdout_rh, stdout_wh = pipe(overlapped=(True, False)) + stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0) + else: + stdout_wfd = stdout + if stderr == PIPE: + stderr_rh, stderr_wh = pipe(overlapped=(True, False)) + stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0) + elif stderr == STDOUT: + stderr_wfd = stdout_wfd + else: + stderr_wfd = stderr + try: + super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd, + stderr=stderr_wfd, **kwds) + except: + for h in (stdin_wh, stdout_rh, stderr_rh): + if h is not None: + _winapi.CloseHandle(h) + raise + else: + if stdin_wh is not None: + self.stdin = PipeHandle(stdin_wh) + if stdout_rh is not None: + self.stdout = PipeHandle(stdout_rh) + if stderr_rh is not None: + self.stderr = PipeHandle(stderr_rh) + finally: + if stdin == PIPE: + os.close(stdin_rfd) + if stdout == PIPE: + os.close(stdout_wfd) + if stderr == PIPE: + os.close(stderr_wfd) diff --git a/Lib/atexit.py b/Lib/atexit.py new file mode 100644 index 0000000000..c990e82ba2 --- /dev/null +++ b/Lib/atexit.py @@ -0,0 +1,9 @@ +# Dummy implementation of atexit + + +def register(func, *args, **kwargs): + return func + + +def unregister(func): + pass diff --git a/Lib/calendar.py b/Lib/calendar.py new file mode 100644 index 0000000000..07594f3a83 --- /dev/null +++ b/Lib/calendar.py @@ -0,0 +1,713 @@ +"""Calendar printing functions + +Note when comparing these calendars to the ones printed by cal(1): By +default, these calendars have Monday as the first day of the week, and +Sunday as the last (the European convention). Use setfirstweekday() to +set the first day of the week (0=Monday, 6=Sunday).""" + +import sys +import datetime +import locale as _locale +from itertools import repeat + +__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday", + "firstweekday", "isleap", "leapdays", "weekday", "monthrange", + "monthcalendar", "prmonth", "month", "prcal", "calendar", + "timegm", "month_name", "month_abbr", "day_name", "day_abbr", + "Calendar", "TextCalendar", "HTMLCalendar", "LocaleTextCalendar", + "LocaleHTMLCalendar", "weekheader"] + +# Exception raised for bad input (with string parameter for details) +error = ValueError + +# Exceptions raised for bad input +class IllegalMonthError(ValueError): + def __init__(self, month): + self.month = month + def __str__(self): + return "bad month number %r; must be 1-12" % self.month + + +class IllegalWeekdayError(ValueError): + def __init__(self, weekday): + self.weekday = weekday + def __str__(self): + return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday + + +# Constants for months referenced later +January = 1 +February = 2 + +# Number of days per month (except for February in leap years) +mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + +# This module used to have hard-coded lists of day and month names, as +# English strings. The classes following emulate a read-only version of +# that, but supply localized names. Note that the values are computed +# fresh on each call, in case the user changes locale between calls. + +class _localized_month: + + _months = [datetime.date(2001, i+1, 1).strftime for i in range(12)] + _months.insert(0, lambda x: "") + + def __init__(self, format): + self.format = format + + def __getitem__(self, i): + funcs = self._months[i] + if isinstance(i, slice): + return [f(self.format) for f in funcs] + else: + return funcs(self.format) + + def __len__(self): + return 13 + + +class _localized_day: + + # January 1, 2001, was a Monday. + _days = [datetime.date(2001, 1, i+1).strftime for i in range(7)] + + def __init__(self, format): + self.format = format + + def __getitem__(self, i): + funcs = self._days[i] + if isinstance(i, slice): + return [f(self.format) for f in funcs] + else: + return funcs(self.format) + + def __len__(self): + return 7 + + +# Full and abbreviated names of weekdays +day_name = _localized_day('%A') +day_abbr = _localized_day('%a') + +# Full and abbreviated names of months (1-based arrays!!!) +month_name = _localized_month('%B') +month_abbr = _localized_month('%b') + +# Constants for weekdays +(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7) + + +def isleap(year): + """Return True for leap years, False for non-leap years.""" + return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0) + + +def leapdays(y1, y2): + """Return number of leap years in range [y1, y2). + Assume y1 <= y2.""" + y1 -= 1 + y2 -= 1 + return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400) + + +def weekday(year, month, day): + """Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12), + day (1-31).""" + return datetime.date(year, month, day).weekday() + + +def monthrange(year, month): + """Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for + year, month.""" + if not 1 <= month <= 12: + raise IllegalMonthError(month) + day1 = weekday(year, month, 1) + ndays = mdays[month] + (month == February and isleap(year)) + return day1, ndays + + +class Calendar(object): + """ + Base calendar class. This class doesn't do any formatting. It simply + provides data to subclasses. + """ + + def __init__(self, firstweekday=0): + self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday + + def getfirstweekday(self): + return self._firstweekday % 7 + + def setfirstweekday(self, firstweekday): + self._firstweekday = firstweekday + + firstweekday = property(getfirstweekday, setfirstweekday) + + def iterweekdays(self): + """ + Return an iterator for one week of weekday numbers starting with the + configured first one. + """ + for i in range(self.firstweekday, self.firstweekday + 7): + yield i%7 + + def itermonthdates(self, year, month): + """ + Return an iterator for one month. The iterator will yield datetime.date + values and will always iterate through complete weeks, so it will yield + dates outside the specified month. + """ + date = datetime.date(year, month, 1) + # Go back to the beginning of the week + days = (date.weekday() - self.firstweekday) % 7 + date -= datetime.timedelta(days=days) + oneday = datetime.timedelta(days=1) + while True: + yield date + try: + date += oneday + except OverflowError: + # Adding one day could fail after datetime.MAXYEAR + break + if date.month != month and date.weekday() == self.firstweekday: + break + + def itermonthdays2(self, year, month): + """ + Like itermonthdates(), but will yield (day number, weekday number) + tuples. For days outside the specified month the day number is 0. + """ + for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): + yield d, i % 7 + + def itermonthdays(self, year, month): + """ + Like itermonthdates(), but will yield day numbers. For days outside + the specified month the day number is 0. + """ + day1, ndays = monthrange(year, month) + days_before = (day1 - self.firstweekday) % 7 + yield from repeat(0, days_before) + yield from range(1, ndays + 1) + days_after = (self.firstweekday - day1 - ndays) % 7 + yield from repeat(0, days_after) + + def monthdatescalendar(self, year, month): + """ + Return a matrix (list of lists) representing a month's calendar. + Each row represents a week; week entries are datetime.date values. + """ + dates = list(self.itermonthdates(year, month)) + return [ dates[i:i+7] for i in range(0, len(dates), 7) ] + + def monthdays2calendar(self, year, month): + """ + Return a matrix representing a month's calendar. + Each row represents a week; week entries are + (day number, weekday number) tuples. Day numbers outside this month + are zero. + """ + days = list(self.itermonthdays2(year, month)) + return [ days[i:i+7] for i in range(0, len(days), 7) ] + + def monthdayscalendar(self, year, month): + """ + Return a matrix representing a month's calendar. + Each row represents a week; days outside this month are zero. + """ + days = list(self.itermonthdays(year, month)) + return [ days[i:i+7] for i in range(0, len(days), 7) ] + + def yeardatescalendar(self, year, width=3): + """ + Return the data for the specified year ready for formatting. The return + value is a list of month rows. Each month row contains up to width months. + Each month contains between 4 and 6 weeks and each week contains 1-7 + days. Days are datetime.date objects. + """ + months = [ + self.monthdatescalendar(year, i) + for i in range(January, January+12) + ] + return [months[i:i+width] for i in range(0, len(months), width) ] + + def yeardays2calendar(self, year, width=3): + """ + Return the data for the specified year ready for formatting (similar to + yeardatescalendar()). Entries in the week lists are + (day number, weekday number) tuples. Day numbers outside this month are + zero. + """ + months = [ + self.monthdays2calendar(year, i) + for i in range(January, January+12) + ] + return [months[i:i+width] for i in range(0, len(months), width) ] + + def yeardayscalendar(self, year, width=3): + """ + Return the data for the specified year ready for formatting (similar to + yeardatescalendar()). Entries in the week lists are day numbers. + Day numbers outside this month are zero. + """ + months = [ + self.monthdayscalendar(year, i) + for i in range(January, January+12) + ] + return [months[i:i+width] for i in range(0, len(months), width) ] + + +class TextCalendar(Calendar): + """ + Subclass of Calendar that outputs a calendar as a simple plain text + similar to the UNIX program cal. + """ + + def prweek(self, theweek, width): + """ + Print a single week (no newline). + """ + print(self.formatweek(theweek, width), end=' ') + + def formatday(self, day, weekday, width): + """ + Returns a formatted day. + """ + if day == 0: + s = '' + else: + s = '%2i' % day # right-align single-digit days + return s.center(width) + + def formatweek(self, theweek, width): + """ + Returns a single week in a string (no newline). + """ + return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek) + + def formatweekday(self, day, width): + """ + Returns a formatted week day name. + """ + if width >= 9: + names = day_name + else: + names = day_abbr + return names[day][:width].center(width) + + def formatweekheader(self, width): + """ + Return a header for a week. + """ + return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays()) + + def formatmonthname(self, theyear, themonth, width, withyear=True): + """ + Return a formatted month name. + """ + s = month_name[themonth] + if withyear: + s = "%s %r" % (s, theyear) + return s.center(width) + + def prmonth(self, theyear, themonth, w=0, l=0): + """ + Print a month's calendar. + """ + print(self.formatmonth(theyear, themonth, w, l), end='') + + def formatmonth(self, theyear, themonth, w=0, l=0): + """ + Return a month's calendar string (multi-line). + """ + w = max(2, w) + l = max(1, l) + s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1) + s = s.rstrip() + s += '\n' * l + s += self.formatweekheader(w).rstrip() + s += '\n' * l + for week in self.monthdays2calendar(theyear, themonth): + s += self.formatweek(week, w).rstrip() + s += '\n' * l + return s + + def formatyear(self, theyear, w=2, l=1, c=6, m=3): + """ + Returns a year's calendar as a multi-line string. + """ + w = max(2, w) + l = max(1, l) + c = max(2, c) + colwidth = (w + 1) * 7 - 1 + v = [] + a = v.append + a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip()) + a('\n'*l) + header = self.formatweekheader(w) + for (i, row) in enumerate(self.yeardays2calendar(theyear, m)): + # months in this row + months = range(m*i+1, min(m*(i+1)+1, 13)) + a('\n'*l) + names = (self.formatmonthname(theyear, k, colwidth, False) + for k in months) + a(formatstring(names, colwidth, c).rstrip()) + a('\n'*l) + headers = (header for k in months) + a(formatstring(headers, colwidth, c).rstrip()) + a('\n'*l) + # max number of weeks for this row + height = max(len(cal) for cal in row) + for j in range(height): + weeks = [] + for cal in row: + if j >= len(cal): + weeks.append('') + else: + weeks.append(self.formatweek(cal[j], w)) + a(formatstring(weeks, colwidth, c).rstrip()) + a('\n' * l) + return ''.join(v) + + def pryear(self, theyear, w=0, l=0, c=6, m=3): + """Print a year's calendar.""" + print(self.formatyear(theyear, w, l, c, m)) + + +class HTMLCalendar(Calendar): + """ + This calendar returns complete HTML pages. + """ + + # CSS classes for the day s + cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"] + + def formatday(self, day, weekday): + """ + Return a day as a table cell. + """ + if day == 0: + return ' ' # day outside month + else: + return '%d' % (self.cssclasses[weekday], day) + + def formatweek(self, theweek): + """ + Return a complete week as a table row. + """ + s = ''.join(self.formatday(d, wd) for (d, wd) in theweek) + return '%s' % s + + def formatweekday(self, day): + """ + Return a weekday name as a table header. + """ + return '%s' % (self.cssclasses[day], day_abbr[day]) + + def formatweekheader(self): + """ + Return a header for a week as a table row. + """ + s = ''.join(self.formatweekday(i) for i in self.iterweekdays()) + return '%s' % s + + def formatmonthname(self, theyear, themonth, withyear=True): + """ + Return a month name as a table row. + """ + if withyear: + s = '%s %s' % (month_name[themonth], theyear) + else: + s = '%s' % month_name[themonth] + return '%s' % s + + def formatmonth(self, theyear, themonth, withyear=True): + """ + Return a formatted month as a table. + """ + v = [] + a = v.append + a('') + a('\n') + a(self.formatmonthname(theyear, themonth, withyear=withyear)) + a('\n') + a(self.formatweekheader()) + a('\n') + for week in self.monthdays2calendar(theyear, themonth): + a(self.formatweek(week)) + a('\n') + a('
') + a('\n') + return ''.join(v) + + def formatyear(self, theyear, width=3): + """ + Return a formatted year as a table of tables. + """ + v = [] + a = v.append + width = max(width, 1) + a('') + a('\n') + a('' % (width, theyear)) + for i in range(January, January+12, width): + # months in this row + months = range(i, min(i+width, 13)) + a('') + for m in months: + a('') + a('') + a('
%s
') + a(self.formatmonth(theyear, m, withyear=False)) + a('
') + return ''.join(v) + + def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None): + """ + Return a formatted year as a complete HTML page. + """ + if encoding is None: + encoding = sys.getdefaultencoding() + v = [] + a = v.append + a('\n' % encoding) + a('\n') + a('\n') + a('\n') + a('\n' % encoding) + if css is not None: + a('\n' % css) + a('Calendar for %d\n' % theyear) + a('\n') + a('\n') + a(self.formatyear(theyear, width)) + a('\n') + a('\n') + return ''.join(v).encode(encoding, "xmlcharrefreplace") + + +class different_locale: + def __init__(self, locale): + self.locale = locale + + def __enter__(self): + self.oldlocale = _locale.getlocale(_locale.LC_TIME) + _locale.setlocale(_locale.LC_TIME, self.locale) + + def __exit__(self, *args): + _locale.setlocale(_locale.LC_TIME, self.oldlocale) + + +class LocaleTextCalendar(TextCalendar): + """ + This class can be passed a locale name in the constructor and will return + month and weekday names in the specified locale. If this locale includes + an encoding all strings containing month and weekday names will be returned + as unicode. + """ + + def __init__(self, firstweekday=0, locale=None): + TextCalendar.__init__(self, firstweekday) + if locale is None: + locale = _locale.getdefaultlocale() + self.locale = locale + + def formatweekday(self, day, width): + with different_locale(self.locale): + if width >= 9: + names = day_name + else: + names = day_abbr + name = names[day] + return name[:width].center(width) + + def formatmonthname(self, theyear, themonth, width, withyear=True): + with different_locale(self.locale): + s = month_name[themonth] + if withyear: + s = "%s %r" % (s, theyear) + return s.center(width) + + +class LocaleHTMLCalendar(HTMLCalendar): + """ + This class can be passed a locale name in the constructor and will return + month and weekday names in the specified locale. If this locale includes + an encoding all strings containing month and weekday names will be returned + as unicode. + """ + def __init__(self, firstweekday=0, locale=None): + HTMLCalendar.__init__(self, firstweekday) + if locale is None: + locale = _locale.getdefaultlocale() + self.locale = locale + + def formatweekday(self, day): + with different_locale(self.locale): + s = day_abbr[day] + return '%s' % (self.cssclasses[day], s) + + def formatmonthname(self, theyear, themonth, withyear=True): + with different_locale(self.locale): + s = month_name[themonth] + if withyear: + s = '%s %s' % (s, theyear) + return '%s' % s + + +# Support for old module level interface +c = TextCalendar() + +firstweekday = c.getfirstweekday + +def setfirstweekday(firstweekday): + if not MONDAY <= firstweekday <= SUNDAY: + raise IllegalWeekdayError(firstweekday) + c.firstweekday = firstweekday + +monthcalendar = c.monthdayscalendar +prweek = c.prweek +week = c.formatweek +weekheader = c.formatweekheader +prmonth = c.prmonth +month = c.formatmonth +calendar = c.formatyear +prcal = c.pryear + + +# Spacing of month columns for multi-column year calendar +_colwidth = 7*3 - 1 # Amount printed by prweek() +_spacing = 6 # Number of spaces between columns + + +def format(cols, colwidth=_colwidth, spacing=_spacing): + """Prints multi-column formatting for year calendars""" + print(formatstring(cols, colwidth, spacing)) + + +def formatstring(cols, colwidth=_colwidth, spacing=_spacing): + """Returns a string formatted from n strings, centered within n columns.""" + spacing *= ' ' + return spacing.join(c.center(colwidth) for c in cols) + + +EPOCH = 1970 +_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal() + + +def timegm(tuple): + """Unrelated but handy function to calculate Unix timestamp from GMT.""" + year, month, day, hour, minute, second = tuple[:6] + days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1 + hours = days*24 + hour + minutes = hours*60 + minute + seconds = minutes*60 + second + return seconds + + +def main(args): + import argparse + parser = argparse.ArgumentParser() + textgroup = parser.add_argument_group('text only arguments') + htmlgroup = parser.add_argument_group('html only arguments') + textgroup.add_argument( + "-w", "--width", + type=int, default=2, + help="width of date column (default 2)" + ) + textgroup.add_argument( + "-l", "--lines", + type=int, default=1, + help="number of lines for each week (default 1)" + ) + textgroup.add_argument( + "-s", "--spacing", + type=int, default=6, + help="spacing between months (default 6)" + ) + textgroup.add_argument( + "-m", "--months", + type=int, default=3, + help="months per row (default 3)" + ) + htmlgroup.add_argument( + "-c", "--css", + default="calendar.css", + help="CSS to use for page" + ) + parser.add_argument( + "-L", "--locale", + default=None, + help="locale to be used from month and weekday names" + ) + parser.add_argument( + "-e", "--encoding", + default=None, + help="encoding to use for output" + ) + parser.add_argument( + "-t", "--type", + default="text", + choices=("text", "html"), + help="output type (text or html)" + ) + parser.add_argument( + "year", + nargs='?', type=int, + help="year number (1-9999)" + ) + parser.add_argument( + "month", + nargs='?', type=int, + help="month number (1-12, text only)" + ) + + options = parser.parse_args(args[1:]) + + if options.locale and not options.encoding: + parser.error("if --locale is specified --encoding is required") + sys.exit(1) + + locale = options.locale, options.encoding + + if options.type == "html": + if options.locale: + cal = LocaleHTMLCalendar(locale=locale) + else: + cal = HTMLCalendar() + encoding = options.encoding + if encoding is None: + encoding = sys.getdefaultencoding() + optdict = dict(encoding=encoding, css=options.css) + write = sys.stdout.buffer.write + if options.year is None: + write(cal.formatyearpage(datetime.date.today().year, **optdict)) + elif options.month is None: + write(cal.formatyearpage(options.year, **optdict)) + else: + parser.error("incorrect number of arguments") + sys.exit(1) + else: + if options.locale: + cal = LocaleTextCalendar(locale=locale) + else: + cal = TextCalendar() + optdict = dict(w=options.width, l=options.lines) + if options.month is None: + optdict["c"] = options.spacing + optdict["m"] = options.months + if options.year is None: + result = cal.formatyear(datetime.date.today().year, **optdict) + elif options.month is None: + result = cal.formatyear(options.year, **optdict) + else: + result = cal.formatmonth(options.year, options.month, **optdict) + write = sys.stdout.write + if options.encoding: + result = result.encode(options.encoding) + write = sys.stdout.buffer.write + write(result) + + +if __name__ == "__main__": + main(sys.argv) diff --git a/Lib/code.py b/Lib/code.py new file mode 100644 index 0000000000..23295f4cf5 --- /dev/null +++ b/Lib/code.py @@ -0,0 +1,314 @@ +"""Utilities needed to emulate Python's interactive interpreter. + +""" + +# Inspired by similar code by Jeff Epler and Fredrik Lundh. + + +import sys +import traceback +import argparse +from codeop import CommandCompiler, compile_command + +__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact", + "compile_command"] + +class InteractiveInterpreter: + """Base class for InteractiveConsole. + + This class deals with parsing and interpreter state (the user's + namespace); it doesn't deal with input buffering or prompting or + input file naming (the filename is always passed in explicitly). + + """ + + def __init__(self, locals=None): + """Constructor. + + The optional 'locals' argument specifies the dictionary in + which code will be executed; it defaults to a newly created + dictionary with key "__name__" set to "__console__" and key + "__doc__" set to None. + + """ + if locals is None: + locals = {"__name__": "__console__", "__doc__": None} + self.locals = locals + self.compile = CommandCompiler() + + def runsource(self, source, filename="", symbol="single"): + """Compile and run some source in the interpreter. + + Arguments are as for compile_command(). + + One several things can happen: + + 1) The input is incorrect; compile_command() raised an + exception (SyntaxError or OverflowError). A syntax traceback + will be printed by calling the showsyntaxerror() method. + + 2) The input is incomplete, and more input is required; + compile_command() returned None. Nothing happens. + + 3) The input is complete; compile_command() returned a code + object. The code is executed by calling self.runcode() (which + also handles run-time exceptions, except for SystemExit). + + The return value is True in case 2, False in the other cases (unless + an exception is raised). The return value can be used to + decide whether to use sys.ps1 or sys.ps2 to prompt the next + line. + + """ + try: + code = self.compile(source, filename, symbol) + except (OverflowError, SyntaxError, ValueError): + # Case 1 + self.showsyntaxerror(filename) + return False + + if code is None: + # Case 2 + return True + + # Case 3 + self.runcode(code) + return False + + def runcode(self, code): + """Execute a code object. + + When an exception occurs, self.showtraceback() is called to + display a traceback. All exceptions are caught except + SystemExit, which is reraised. + + A note about KeyboardInterrupt: this exception may occur + elsewhere in this code, and may not always be caught. The + caller should be prepared to deal with it. + + """ + try: + exec(code, self.locals) + except SystemExit: + raise + except: + self.showtraceback() + + def showsyntaxerror(self, filename=None): + """Display the syntax error that just occurred. + + This doesn't display a stack trace because there isn't one. + + If a filename is given, it is stuffed in the exception instead + of what was there before (because Python's parser always uses + "" when reading from a string). + + The output is written by self.write(), below. + + """ + type, value, tb = sys.exc_info() + sys.last_type = type + sys.last_value = value + sys.last_traceback = tb + if filename and type is SyntaxError: + # Work hard to stuff the correct filename in the exception + try: + msg, (dummy_filename, lineno, offset, line) = value.args + except ValueError: + # Not the format we expect; leave it alone + pass + else: + # Stuff in the right filename + value = SyntaxError(msg, (filename, lineno, offset, line)) + sys.last_value = value + if sys.excepthook is sys.__excepthook__: + lines = traceback.format_exception_only(type, value) + self.write(''.join(lines)) + else: + # If someone has set sys.excepthook, we let that take precedence + # over self.write + sys.excepthook(type, value, tb) + + def showtraceback(self): + """Display the exception that just occurred. + + We remove the first stack item because it is our own code. + + The output is written by self.write(), below. + + """ + sys.last_type, sys.last_value, last_tb = ei = sys.exc_info() + sys.last_traceback = last_tb + try: + lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next) + if sys.excepthook is sys.__excepthook__: + self.write(''.join(lines)) + else: + # If someone has set sys.excepthook, we let that take precedence + # over self.write + sys.excepthook(ei[0], ei[1], last_tb) + finally: + last_tb = ei = None + + def write(self, data): + """Write a string. + + The base implementation writes to sys.stderr; a subclass may + replace this with a different implementation. + + """ + sys.stderr.write(data) + + +class InteractiveConsole(InteractiveInterpreter): + """Closely emulate the behavior of the interactive Python interpreter. + + This class builds on InteractiveInterpreter and adds prompting + using the familiar sys.ps1 and sys.ps2, and input buffering. + + """ + + def __init__(self, locals=None, filename=""): + """Constructor. + + The optional locals argument will be passed to the + InteractiveInterpreter base class. + + The optional filename argument should specify the (file)name + of the input stream; it will show up in tracebacks. + + """ + InteractiveInterpreter.__init__(self, locals) + self.filename = filename + self.resetbuffer() + + def resetbuffer(self): + """Reset the input buffer.""" + self.buffer = [] + + def interact(self, banner=None, exitmsg=None): + """Closely emulate the interactive Python console. + + The optional banner argument specifies the banner to print + before the first interaction; by default it prints a banner + similar to the one printed by the real Python interpreter, + followed by the current class name in parentheses (so as not + to confuse this with the real interpreter -- since it's so + close!). + + The optional exitmsg argument specifies the exit message + printed when exiting. Pass the empty string to suppress + printing an exit message. If exitmsg is not given or None, + a default message is printed. + + """ + try: + sys.ps1 + except AttributeError: + sys.ps1 = ">>> " + try: + sys.ps2 + except AttributeError: + sys.ps2 = "... " + cprt = 'Type "help", "copyright", "credits" or "license" for more information.' + if banner is None: + self.write("Python %s on %s\n%s\n(%s)\n" % + (sys.version, sys.platform, cprt, + self.__class__.__name__)) + elif banner: + self.write("%s\n" % str(banner)) + more = 0 + while 1: + try: + if more: + prompt = sys.ps2 + else: + prompt = sys.ps1 + try: + line = self.raw_input(prompt) + except EOFError: + self.write("\n") + break + else: + more = self.push(line) + except KeyboardInterrupt: + self.write("\nKeyboardInterrupt\n") + self.resetbuffer() + more = 0 + if exitmsg is None: + self.write('now exiting %s...\n' % self.__class__.__name__) + elif exitmsg != '': + self.write('%s\n' % exitmsg) + + def push(self, line): + """Push a line to the interpreter. + + The line should not have a trailing newline; it may have + internal newlines. The line is appended to a buffer and the + interpreter's runsource() method is called with the + concatenated contents of the buffer as source. If this + indicates that the command was executed or invalid, the buffer + is reset; otherwise, the command is incomplete, and the buffer + is left as it was after the line was appended. The return + value is 1 if more input is required, 0 if the line was dealt + with in some way (this is the same as runsource()). + + """ + self.buffer.append(line) + source = "\n".join(self.buffer) + more = self.runsource(source, self.filename) + if not more: + self.resetbuffer() + return more + + def raw_input(self, prompt=""): + """Write a prompt and read a line. + + The returned line does not include the trailing newline. + When the user enters the EOF key sequence, EOFError is raised. + + The base implementation uses the built-in function + input(); a subclass may replace this with a different + implementation. + + """ + return input(prompt) + + + +def interact(banner=None, readfunc=None, local=None, exitmsg=None): + """Closely emulate the interactive Python interpreter. + + This is a backwards compatible interface to the InteractiveConsole + class. When readfunc is not specified, it attempts to import the + readline module to enable GNU readline if it is available. + + Arguments (all optional, all default to None): + + banner -- passed to InteractiveConsole.interact() + readfunc -- if not None, replaces InteractiveConsole.raw_input() + local -- passed to InteractiveInterpreter.__init__() + exitmsg -- passed to InteractiveConsole.interact() + + """ + console = InteractiveConsole(local) + if readfunc is not None: + console.raw_input = readfunc + else: + try: + import readline + except ImportError: + pass + console.interact(banner, exitmsg) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('-q', action='store_true', + help="don't print version and copyright messages") + args = parser.parse_args() + if args.q or sys.flags.quiet: + banner = '' + else: + banner = None + interact(banner) diff --git a/Lib/collections/__init__.py b/Lib/collections/__init__.py index 4b6cf87be6..6260b96010 100644 --- a/Lib/collections/__init__.py +++ b/Lib/collections/__init__.py @@ -17,7 +17,12 @@ __all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList', 'UserString', 'Counter', 'OrderedDict', 'ChainMap'] +# For backwards compatibility, continue to make the collections ABCs +# available through the collections module. +from _collections_abc import * import _collections_abc +__all__ += _collections_abc.__all__ + from operator import itemgetter as _itemgetter, eq as _eq from keyword import iskeyword as _iskeyword import sys as _sys diff --git a/Lib/colorsys.py b/Lib/colorsys.py index 706c8dd8a6..12b432537b 100644 --- a/Lib/colorsys.py +++ b/Lib/colorsys.py @@ -1,6 +1,37 @@ +"""Conversion functions between RGB and other color systems. +This modules provides two functions for each color system ABC: + rgb_to_abc(r, g, b) --> a, b, c + abc_to_rgb(a, b, c) --> r, g, b +All inputs and outputs are triples of floats in the range [0.0...1.0] +(with the exception of I and Q, which covers a slightly larger range). +Inputs outside the valid range may cause exceptions or invalid outputs. +Supported color systems: +RGB: Red, Green, Blue components +YIQ: Luminance, Chrominance (used by composite video signals) +HLS: Hue, Luminance, Saturation +HSV: Hue, Saturation, Value +""" -# TODO: implement standard library here +# References: +# http://en.wikipedia.org/wiki/YIQ +# http://en.wikipedia.org/wiki/HLS_color_space +# http://en.wikipedia.org/wiki/HSV_color_space +__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb", + "rgb_to_hsv","hsv_to_rgb"] + +# Some floating point constants + +ONE_THIRD = 1.0/3.0 +ONE_SIXTH = 1.0/6.0 +TWO_THIRD = 2.0/3.0 + +# YIQ: used by composite video signals (linear combinations of RGB) +# Y: perceived grey level (0.0 == black, 1.0 == white) +# I, Q: color components +# +# There are a great many versions of the constants used in these formulae. +# The ones in this library uses constants from the FCC version of NTSC. def rgb_to_yiq(r, g, b): y = 0.30*r + 0.59*g + 0.11*b @@ -8,3 +39,122 @@ def rgb_to_yiq(r, g, b): q = 0.48*(r-y) + 0.41*(b-y) return (y, i, q) +def yiq_to_rgb(y, i, q): + # r = y + (0.27*q + 0.41*i) / (0.74*0.41 + 0.27*0.48) + # b = y + (0.74*q - 0.48*i) / (0.74*0.41 + 0.27*0.48) + # g = y - (0.30*(r-y) + 0.11*(b-y)) / 0.59 + + r = y + 0.9468822170900693*i + 0.6235565819861433*q + g = y - 0.27478764629897834*i - 0.6356910791873801*q + b = y - 1.1085450346420322*i + 1.7090069284064666*q + + if r < 0.0: + r = 0.0 + if g < 0.0: + g = 0.0 + if b < 0.0: + b = 0.0 + if r > 1.0: + r = 1.0 + if g > 1.0: + g = 1.0 + if b > 1.0: + b = 1.0 + return (r, g, b) + + +# HLS: Hue, Luminance, Saturation +# H: position in the spectrum +# L: color lightness +# S: color saturation + +def rgb_to_hls(r, g, b): + maxc = max(r, g, b) + minc = min(r, g, b) + # XXX Can optimize (maxc+minc) and (maxc-minc) + l = (minc+maxc)/2.0 + if minc == maxc: + return 0.0, l, 0.0 + if l <= 0.5: + s = (maxc-minc) / (maxc+minc) + else: + s = (maxc-minc) / (2.0-maxc-minc) + rc = (maxc-r) / (maxc-minc) + gc = (maxc-g) / (maxc-minc) + bc = (maxc-b) / (maxc-minc) + if r == maxc: + h = bc-gc + elif g == maxc: + h = 2.0+rc-bc + else: + h = 4.0+gc-rc + h = (h/6.0) % 1.0 + return h, l, s + +def hls_to_rgb(h, l, s): + if s == 0.0: + return l, l, l + if l <= 0.5: + m2 = l * (1.0+s) + else: + m2 = l+s-(l*s) + m1 = 2.0*l - m2 + return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD)) + +def _v(m1, m2, hue): + hue = hue % 1.0 + if hue < ONE_SIXTH: + return m1 + (m2-m1)*hue*6.0 + if hue < 0.5: + return m2 + if hue < TWO_THIRD: + return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0 + return m1 + + +# HSV: Hue, Saturation, Value +# H: position in the spectrum +# S: color saturation ("purity") +# V: color brightness + +def rgb_to_hsv(r, g, b): + maxc = max(r, g, b) + minc = min(r, g, b) + v = maxc + if minc == maxc: + return 0.0, 0.0, v + s = (maxc-minc) / maxc + rc = (maxc-r) / (maxc-minc) + gc = (maxc-g) / (maxc-minc) + bc = (maxc-b) / (maxc-minc) + if r == maxc: + h = bc-gc + elif g == maxc: + h = 2.0+rc-bc + else: + h = 4.0+gc-rc + h = (h/6.0) % 1.0 + return h, s, v + +def hsv_to_rgb(h, s, v): + if s == 0.0: + return v, v, v + i = int(h*6.0) # XXX assume int() truncates! + f = (h*6.0) - i + p = v*(1.0 - s) + q = v*(1.0 - s*f) + t = v*(1.0 - s*(1.0-f)) + i = i%6 + if i == 0: + return v, t, p + if i == 1: + return q, v, p + if i == 2: + return p, v, t + if i == 3: + return p, q, v + if i == 4: + return t, p, v + if i == 5: + return v, p, q + # Cannot get here diff --git a/Lib/compileall.py b/Lib/compileall.py new file mode 100644 index 0000000000..1c9ceb6930 --- /dev/null +++ b/Lib/compileall.py @@ -0,0 +1,295 @@ +"""Module/script to byte-compile all .py files to .pyc files. + +When called as a script with arguments, this compiles the directories +given as arguments recursively; the -l option prevents it from +recursing into directories. + +Without arguments, if compiles all modules on sys.path, without +recursing into subdirectories. (Even though it should do so for +packages -- for now, you'll have to deal with packages separately.) + +See module py_compile for details of the actual byte-compilation. +""" +import os +import sys +import importlib.util +import py_compile +import struct + +try: + from concurrent.futures import ProcessPoolExecutor +except ImportError: + ProcessPoolExecutor = None +from functools import partial + +__all__ = ["compile_dir","compile_file","compile_path"] + +def _walk_dir(dir, ddir=None, maxlevels=10, quiet=0): + if quiet < 2 and isinstance(dir, os.PathLike): + dir = os.fspath(dir) + if not quiet: + print('Listing {!r}...'.format(dir)) + try: + names = os.listdir(dir) + except OSError: + if quiet < 2: + print("Can't list {!r}".format(dir)) + names = [] + names.sort() + for name in names: + if name == '__pycache__': + continue + fullname = os.path.join(dir, name) + if ddir is not None: + dfile = os.path.join(ddir, name) + else: + dfile = None + if not os.path.isdir(fullname): + yield fullname + elif (maxlevels > 0 and name != os.curdir and name != os.pardir and + os.path.isdir(fullname) and not os.path.islink(fullname)): + yield from _walk_dir(fullname, ddir=dfile, + maxlevels=maxlevels - 1, quiet=quiet) + +def compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None, + quiet=0, legacy=False, optimize=-1, workers=1): + """Byte-compile all modules in the given directory tree. + + Arguments (only dir is required): + + dir: the directory to byte-compile + maxlevels: maximum recursion level (default 10) + ddir: the directory that will be prepended to the path to the + file as it is compiled into each byte-code file. + force: if True, force compilation, even if timestamps are up-to-date + quiet: full output with False or 0, errors only with 1, + no output with 2 + legacy: if True, produce legacy pyc paths instead of PEP 3147 paths + optimize: optimization level or -1 for level of the interpreter + workers: maximum number of parallel workers + """ + if workers is not None and workers < 0: + raise ValueError('workers must be greater or equal to 0') + + files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels, + ddir=ddir) + success = True + if workers is not None and workers != 1 and ProcessPoolExecutor is not None: + workers = workers or None + with ProcessPoolExecutor(max_workers=workers) as executor: + results = executor.map(partial(compile_file, + ddir=ddir, force=force, + rx=rx, quiet=quiet, + legacy=legacy, + optimize=optimize), + files) + success = min(results, default=True) + else: + for file in files: + if not compile_file(file, ddir, force, rx, quiet, + legacy, optimize): + success = False + return success + +def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0, + legacy=False, optimize=-1): + """Byte-compile one file. + + Arguments (only fullname is required): + + fullname: the file to byte-compile + ddir: if given, the directory name compiled in to the + byte-code file. + force: if True, force compilation, even if timestamps are up-to-date + quiet: full output with False or 0, errors only with 1, + no output with 2 + legacy: if True, produce legacy pyc paths instead of PEP 3147 paths + optimize: optimization level or -1 for level of the interpreter + """ + success = True + if quiet < 2 and isinstance(fullname, os.PathLike): + fullname = os.fspath(fullname) + name = os.path.basename(fullname) + if ddir is not None: + dfile = os.path.join(ddir, name) + else: + dfile = None + if rx is not None: + mo = rx.search(fullname) + if mo: + return success + if os.path.isfile(fullname): + if legacy: + cfile = fullname + 'c' + else: + if optimize >= 0: + opt = optimize if optimize >= 1 else '' + cfile = importlib.util.cache_from_source( + fullname, optimization=opt) + else: + cfile = importlib.util.cache_from_source(fullname) + cache_dir = os.path.dirname(cfile) + head, tail = name[:-3], name[-3:] + if tail == '.py': + if not force: + try: + mtime = int(os.stat(fullname).st_mtime) + expect = struct.pack('<4sl', importlib.util.MAGIC_NUMBER, + mtime) + with open(cfile, 'rb') as chandle: + actual = chandle.read(8) + if expect == actual: + return success + except OSError: + pass + if not quiet: + print('Compiling {!r}...'.format(fullname)) + try: + ok = py_compile.compile(fullname, cfile, dfile, True, + optimize=optimize) + except py_compile.PyCompileError as err: + success = False + if quiet >= 2: + return success + elif quiet: + print('*** Error compiling {!r}...'.format(fullname)) + else: + print('*** ', end='') + # escape non-printable characters in msg + msg = err.msg.encode(sys.stdout.encoding, + errors='backslashreplace') + msg = msg.decode(sys.stdout.encoding) + print(msg) + except (SyntaxError, UnicodeError, OSError) as e: + success = False + if quiet >= 2: + return success + elif quiet: + print('*** Error compiling {!r}...'.format(fullname)) + else: + print('*** ', end='') + print(e.__class__.__name__ + ':', e) + else: + if ok == 0: + success = False + return success + +def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=0, + legacy=False, optimize=-1): + """Byte-compile all module on sys.path. + + Arguments (all optional): + + skip_curdir: if true, skip current directory (default True) + maxlevels: max recursion level (default 0) + force: as for compile_dir() (default False) + quiet: as for compile_dir() (default 0) + legacy: as for compile_dir() (default False) + optimize: as for compile_dir() (default -1) + """ + success = True + for dir in sys.path: + if (not dir or dir == os.curdir) and skip_curdir: + if quiet < 2: + print('Skipping current directory') + else: + success = success and compile_dir(dir, maxlevels, None, + force, quiet=quiet, + legacy=legacy, optimize=optimize) + return success + + +def main(): + """Script main program.""" + import argparse + + parser = argparse.ArgumentParser( + description='Utilities to support installing Python libraries.') + parser.add_argument('-l', action='store_const', const=0, + default=10, dest='maxlevels', + help="don't recurse into subdirectories") + parser.add_argument('-r', type=int, dest='recursion', + help=('control the maximum recursion level. ' + 'if `-l` and `-r` options are specified, ' + 'then `-r` takes precedence.')) + parser.add_argument('-f', action='store_true', dest='force', + help='force rebuild even if timestamps are up to date') + parser.add_argument('-q', action='count', dest='quiet', default=0, + help='output only error messages; -qq will suppress ' + 'the error messages as well.') + parser.add_argument('-b', action='store_true', dest='legacy', + help='use legacy (pre-PEP3147) compiled file locations') + parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None, + help=('directory to prepend to file paths for use in ' + 'compile-time tracebacks and in runtime ' + 'tracebacks in cases where the source file is ' + 'unavailable')) + parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None, + help=('skip files matching the regular expression; ' + 'the regexp is searched for in the full path ' + 'of each file considered for compilation')) + parser.add_argument('-i', metavar='FILE', dest='flist', + help=('add all the files and directories listed in ' + 'FILE to the list considered for compilation; ' + 'if "-", names are read from stdin')) + parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*', + help=('zero or more file and directory names ' + 'to compile; if no arguments given, defaults ' + 'to the equivalent of -l sys.path')) + parser.add_argument('-j', '--workers', default=1, + type=int, help='Run compileall concurrently') + + args = parser.parse_args() + compile_dests = args.compile_dest + + if args.rx: + import re + args.rx = re.compile(args.rx) + + + if args.recursion is not None: + maxlevels = args.recursion + else: + maxlevels = args.maxlevels + + # if flist is provided then load it + if args.flist: + try: + with (sys.stdin if args.flist=='-' else open(args.flist)) as f: + for line in f: + compile_dests.append(line.strip()) + except OSError: + if args.quiet < 2: + print("Error reading file list {}".format(args.flist)) + return False + + if args.workers is not None: + args.workers = args.workers or None + + success = True + try: + if compile_dests: + for dest in compile_dests: + if os.path.isfile(dest): + if not compile_file(dest, args.ddir, args.force, args.rx, + args.quiet, args.legacy): + success = False + else: + if not compile_dir(dest, maxlevels, args.ddir, + args.force, args.rx, args.quiet, + args.legacy, workers=args.workers): + success = False + return success + else: + return compile_path(legacy=args.legacy, force=args.force, + quiet=args.quiet) + except KeyboardInterrupt: + if args.quiet < 2: + print("\n[interrupted]") + return False + return True + + +if __name__ == '__main__': + exit_status = int(not main()) + sys.exit(exit_status) diff --git a/Lib/concurrent/__init__.py b/Lib/concurrent/__init__.py new file mode 100644 index 0000000000..196d378857 --- /dev/null +++ b/Lib/concurrent/__init__.py @@ -0,0 +1 @@ +# This directory is a Python package. diff --git a/Lib/concurrent/futures/__init__.py b/Lib/concurrent/futures/__init__.py new file mode 100644 index 0000000000..b5231f8aab --- /dev/null +++ b/Lib/concurrent/futures/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Execute computations asynchronously using threads or processes.""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +from concurrent.futures._base import (FIRST_COMPLETED, + FIRST_EXCEPTION, + ALL_COMPLETED, + CancelledError, + TimeoutError, + Future, + Executor, + wait, + as_completed) +from concurrent.futures.process import ProcessPoolExecutor +from concurrent.futures.thread import ThreadPoolExecutor diff --git a/Lib/concurrent/futures/_base.py b/Lib/concurrent/futures/_base.py new file mode 100644 index 0000000000..295489c93e --- /dev/null +++ b/Lib/concurrent/futures/_base.py @@ -0,0 +1,582 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +import collections +import logging +import threading +import time + +FIRST_COMPLETED = 'FIRST_COMPLETED' +FIRST_EXCEPTION = 'FIRST_EXCEPTION' +ALL_COMPLETED = 'ALL_COMPLETED' +_AS_COMPLETED = '_AS_COMPLETED' + +# Possible future states (for internal use by the futures package). +PENDING = 'PENDING' +RUNNING = 'RUNNING' +# The future was cancelled by the user... +CANCELLED = 'CANCELLED' +# ...and _Waiter.add_cancelled() was called by a worker. +CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' +FINISHED = 'FINISHED' + +_FUTURE_STATES = [ + PENDING, + RUNNING, + CANCELLED, + CANCELLED_AND_NOTIFIED, + FINISHED +] + +_STATE_TO_DESCRIPTION_MAP = { + PENDING: "pending", + RUNNING: "running", + CANCELLED: "cancelled", + CANCELLED_AND_NOTIFIED: "cancelled", + FINISHED: "finished" +} + +# Logger for internal use by the futures package. +LOGGER = logging.getLogger("concurrent.futures") + +class Error(Exception): + """Base class for all future-related exceptions.""" + pass + +class CancelledError(Error): + """The Future was cancelled.""" + pass + +class TimeoutError(Error): + """The operation exceeded the given deadline.""" + pass + +class _Waiter(object): + """Provides the event that wait() and as_completed() block on.""" + def __init__(self): + self.event = threading.Event() + self.finished_futures = [] + + def add_result(self, future): + self.finished_futures.append(future) + + def add_exception(self, future): + self.finished_futures.append(future) + + def add_cancelled(self, future): + self.finished_futures.append(future) + +class _AsCompletedWaiter(_Waiter): + """Used by as_completed().""" + + def __init__(self): + super(_AsCompletedWaiter, self).__init__() + self.lock = threading.Lock() + + def add_result(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_result(future) + self.event.set() + + def add_exception(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_exception(future) + self.event.set() + + def add_cancelled(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_cancelled(future) + self.event.set() + +class _FirstCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_COMPLETED).""" + + def add_result(self, future): + super().add_result(future) + self.event.set() + + def add_exception(self, future): + super().add_exception(future) + self.event.set() + + def add_cancelled(self, future): + super().add_cancelled(future) + self.event.set() + +class _AllCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" + + def __init__(self, num_pending_calls, stop_on_exception): + self.num_pending_calls = num_pending_calls + self.stop_on_exception = stop_on_exception + self.lock = threading.Lock() + super().__init__() + + def _decrement_pending_calls(self): + with self.lock: + self.num_pending_calls -= 1 + if not self.num_pending_calls: + self.event.set() + + def add_result(self, future): + super().add_result(future) + self._decrement_pending_calls() + + def add_exception(self, future): + super().add_exception(future) + if self.stop_on_exception: + self.event.set() + else: + self._decrement_pending_calls() + + def add_cancelled(self, future): + super().add_cancelled(future) + self._decrement_pending_calls() + +class _AcquireFutures(object): + """A context manager that does an ordered acquire of Future conditions.""" + + def __init__(self, futures): + self.futures = sorted(futures, key=id) + + def __enter__(self): + for future in self.futures: + future._condition.acquire() + + def __exit__(self, *args): + for future in self.futures: + future._condition.release() + +def _create_and_install_waiters(fs, return_when): + if return_when == _AS_COMPLETED: + waiter = _AsCompletedWaiter() + elif return_when == FIRST_COMPLETED: + waiter = _FirstCompletedWaiter() + else: + pending_count = sum( + f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) + + if return_when == FIRST_EXCEPTION: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) + elif return_when == ALL_COMPLETED: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) + else: + raise ValueError("Invalid return condition: %r" % return_when) + + for f in fs: + f._waiters.append(waiter) + + return waiter + +def as_completed(fs, timeout=None): + """An iterator over the given futures that yields each as it completes. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + iterate over. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator that yields the given Futures as they complete (finished or + cancelled). If any given Futures are duplicated, they will be returned + once. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + """ + if timeout is not None: + end_time = timeout + time.time() + + fs = set(fs) + with _AcquireFutures(fs): + finished = set( + f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + pending = fs - finished + waiter = _create_and_install_waiters(fs, _AS_COMPLETED) + + try: + yield from finished + + while pending: + if timeout is None: + wait_timeout = None + else: + wait_timeout = end_time - time.time() + if wait_timeout < 0: + raise TimeoutError( + '%d (of %d) futures unfinished' % ( + len(pending), len(fs))) + + waiter.event.wait(wait_timeout) + + with waiter.lock: + finished = waiter.finished_futures + waiter.finished_futures = [] + waiter.event.clear() + + for future in finished: + yield future + pending.remove(future) + + finally: + for f in fs: + with f._condition: + f._waiters.remove(waiter) + +DoneAndNotDoneFutures = collections.namedtuple( + 'DoneAndNotDoneFutures', 'done not_done') +def wait(fs, timeout=None, return_when=ALL_COMPLETED): + """Wait for the futures in the given sequence to complete. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + wait upon. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + return_when: Indicates when this function should return. The options + are: + + FIRST_COMPLETED - Return when any future finishes or is + cancelled. + FIRST_EXCEPTION - Return when any future finishes by raising an + exception. If no future raises an exception + then it is equivalent to ALL_COMPLETED. + ALL_COMPLETED - Return when all futures finish or are cancelled. + + Returns: + A named 2-tuple of sets. The first set, named 'done', contains the + futures that completed (is finished or cancelled) before the wait + completed. The second set, named 'not_done', contains uncompleted + futures. + """ + with _AcquireFutures(fs): + done = set(f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + not_done = set(fs) - done + + if (return_when == FIRST_COMPLETED) and done: + return DoneAndNotDoneFutures(done, not_done) + elif (return_when == FIRST_EXCEPTION) and done: + if any(f for f in done + if not f.cancelled() and f.exception() is not None): + return DoneAndNotDoneFutures(done, not_done) + + if len(done) == len(fs): + return DoneAndNotDoneFutures(done, not_done) + + waiter = _create_and_install_waiters(fs, return_when) + + waiter.event.wait(timeout) + for f in fs: + with f._condition: + f._waiters.remove(waiter) + + done.update(waiter.finished_futures) + return DoneAndNotDoneFutures(done, set(fs) - done) + +class Future(object): + """Represents the result of an asynchronous computation.""" + + def __init__(self): + """Initializes the future. Should not be called by clients.""" + self._condition = threading.Condition() + self._state = PENDING + self._result = None + self._exception = None + self._waiters = [] + self._done_callbacks = [] + + def _invoke_callbacks(self): + for callback in self._done_callbacks: + try: + callback(self) + except Exception: + LOGGER.exception('exception calling callback for %r', self) + + def __repr__(self): + with self._condition: + if self._state == FINISHED: + if self._exception: + return '<%s at %#x state=%s raised %s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._exception.__class__.__name__) + else: + return '<%s at %#x state=%s returned %s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._result.__class__.__name__) + return '<%s at %#x state=%s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state]) + + def cancel(self): + """Cancel the future if possible. + + Returns True if the future was cancelled, False otherwise. A future + cannot be cancelled if it is running or has already completed. + """ + with self._condition: + if self._state in [RUNNING, FINISHED]: + return False + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + return True + + self._state = CANCELLED + self._condition.notify_all() + + self._invoke_callbacks() + return True + + def cancelled(self): + """Return True if the future was cancelled.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] + + def running(self): + """Return True if the future is currently executing.""" + with self._condition: + return self._state == RUNNING + + def done(self): + """Return True of the future was cancelled or finished executing.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] + + def __get_result(self): + if self._exception: + raise self._exception + else: + return self._result + + def add_done_callback(self, fn): + """Attaches a callable that will be called when the future finishes. + + Args: + fn: A callable that will be called with this future as its only + argument when the future completes or is cancelled. The callable + will always be called by a thread in the same process in which + it was added. If the future has already completed or been + cancelled then the callable will be called immediately. These + callables are called in the order that they were added. + """ + with self._condition: + if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: + self._done_callbacks.append(fn) + return + fn(self) + + def result(self, timeout=None): + """Return the result of the call that the future represents. + + Args: + timeout: The number of seconds to wait for the result if the future + isn't done. If None, then there is no limit on the wait time. + + Returns: + The result of the call that the future represents. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + Exception: If the call raised then that exception will be raised. + """ + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + else: + raise TimeoutError() + + def exception(self, timeout=None): + """Return the exception raised by the call that the future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + """ + + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception + else: + raise TimeoutError() + + # The following methods should only be used by Executors and in tests. + def set_running_or_notify_cancel(self): + """Mark the future as running or process any cancel notifications. + + Should only be used by Executor implementations and unit tests. + + If the future has been cancelled (cancel() was called and returned + True) then any threads waiting on the future completing (though calls + to as_completed() or wait()) are notified and False is returned. + + If the future was not cancelled then it is put in the running state + (future calls to running() will return True) and True is returned. + + This method should be called by Executor implementations before + executing the work associated with this future. If this method returns + False then the work should not be executed. + + Returns: + False if the Future was cancelled, True otherwise. + + Raises: + RuntimeError: if this method was already called or if set_result() + or set_exception() was called. + """ + with self._condition: + if self._state == CANCELLED: + self._state = CANCELLED_AND_NOTIFIED + for waiter in self._waiters: + waiter.add_cancelled(self) + # self._condition.notify_all() is not necessary because + # self.cancel() triggers a notification. + return False + elif self._state == PENDING: + self._state = RUNNING + return True + else: + LOGGER.critical('Future %s in unexpected state: %s', + id(self), + self._state) + raise RuntimeError('Future in unexpected state') + + def set_result(self, result): + """Sets the return value of work associated with the future. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + self._result = result + self._state = FINISHED + for waiter in self._waiters: + waiter.add_result(self) + self._condition.notify_all() + self._invoke_callbacks() + + def set_exception(self, exception): + """Sets the result of the future as being the given exception. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + self._exception = exception + self._state = FINISHED + for waiter in self._waiters: + waiter.add_exception(self) + self._condition.notify_all() + self._invoke_callbacks() + +class Executor(object): + """This is an abstract base class for concrete asynchronous executors.""" + + def submit(self, fn, *args, **kwargs): + """Submits a callable to be executed with the given arguments. + + Schedules the callable to be executed as fn(*args, **kwargs) and returns + a Future instance representing the execution of the callable. + + Returns: + A Future representing the given call. + """ + raise NotImplementedError() + + def map(self, fn, *iterables, timeout=None, chunksize=1): + """Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: The size of the chunks the iterable will be broken into + before being passed to a child process. This argument is only + used by ProcessPoolExecutor; it is ignored by + ThreadPoolExecutor. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + if timeout is not None: + end_time = timeout + time.time() + + fs = [self.submit(fn, *args) for args in zip(*iterables)] + + # Yield must be hidden in closure so that the futures are submitted + # before the first iterator value is required. + def result_iterator(): + try: + for future in fs: + if timeout is None: + yield future.result() + else: + yield future.result(end_time - time.time()) + finally: + for future in fs: + future.cancel() + return result_iterator() + + def shutdown(self, wait=True): + """Clean-up the resources associated with the Executor. + + It is safe to call this method several times. Otherwise, no other + methods can be called after this one. + + Args: + wait: If True then shutdown will not return until all running + futures have finished executing and the resources used by the + executor have been reclaimed. + """ + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown(wait=True) + return False diff --git a/Lib/concurrent/futures/process.py b/Lib/concurrent/futures/process.py new file mode 100644 index 0000000000..8f1d714193 --- /dev/null +++ b/Lib/concurrent/futures/process.py @@ -0,0 +1,503 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ProcessPoolExecutor. + +The follow diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | => | | => | Call Q | => | | +| | +----------+ | | +-----------+ | | +| | | ... | | | | ... | | | +| | | 6 | | | | 5, call() | | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | | | | 4, result | | | +| | | ... | | | | 3, except | | | ++----------+ +------------+ +--------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Result Q" +""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +import atexit +import os +from concurrent.futures import _base +import queue +from queue import Full +import multiprocessing +from multiprocessing import SimpleQueue +from multiprocessing.connection import wait +import threading +import weakref +from functools import partial +import itertools +import traceback + +# Workers are created as daemon threads and processes. This is done to allow the +# interpreter to exit when there are still idle processes in a +# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However, +# allowing workers to die with the interpreter has two undesirable properties: +# - The workers would still be running during interpreter shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads/processes finish. + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False + +def _python_exit(): + global _shutdown + _shutdown = True + items = list(_threads_queues.items()) + for t, q in items: + q.put(None) + for t, q in items: + t.join() + +# Controls how many more calls than processes will be queued in the call queue. +# A smaller number will mean that processes spend more time idle waiting for +# work while a larger number will make Future.cancel() succeed less frequently +# (Futures in the call queue cannot be cancelled). +EXTRA_QUEUED_CALLS = 1 + +# Hack to embed stringification of remote traceback in local traceback + +class _RemoteTraceback(Exception): + def __init__(self, tb): + self.tb = tb + def __str__(self): + return self.tb + +class _ExceptionWithTraceback: + def __init__(self, exc, tb): + tb = traceback.format_exception(type(exc), exc, tb) + tb = ''.join(tb) + self.exc = exc + self.tb = '\n"""\n%s"""' % tb + def __reduce__(self): + return _rebuild_exc, (self.exc, self.tb) + +def _rebuild_exc(exc, tb): + exc.__cause__ = _RemoteTraceback(tb) + return exc + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + +class _ResultItem(object): + def __init__(self, work_id, exception=None, result=None): + self.work_id = work_id + self.exception = exception + self.result = result + +class _CallItem(object): + def __init__(self, work_id, fn, args, kwargs): + self.work_id = work_id + self.fn = fn + self.args = args + self.kwargs = kwargs + +def _get_chunks(*iterables, chunksize): + """ Iterates over zip()ed iterables in chunks. """ + it = zip(*iterables) + while True: + chunk = tuple(itertools.islice(it, chunksize)) + if not chunk: + return + yield chunk + +def _process_chunk(fn, chunk): + """ Processes a chunk of an iterable passed to map. + + Runs the function passed to map() on a chunk of the + iterable passed to map. + + This function is run in a separate process. + + """ + return [fn(*args) for args in chunk] + +def _process_worker(call_queue, result_queue): + """Evaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A multiprocessing.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A multiprocessing.Queue of _ResultItems that will written + to by the worker. + shutdown: A multiprocessing.Event that will be set as a signal to the + worker that it should exit when call_queue is empty. + """ + while True: + call_item = call_queue.get(block=True) + if call_item is None: + # Wake up queue management thread + result_queue.put(os.getpid()) + return + try: + r = call_item.fn(*call_item.args, **call_item.kwargs) + except BaseException as e: + exc = _ExceptionWithTraceback(e, e.__traceback__) + result_queue.put(_ResultItem(call_item.work_id, exception=exc)) + else: + result_queue.put(_ResultItem(call_item.work_id, + result=r)) + +def _add_call_item_to_queue(pending_work_items, + work_ids, + call_queue): + """Fills call_queue with _WorkItems from pending_work_items. + + This function never blocks. + + Args: + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids + are consumed and the corresponding _WorkItems from + pending_work_items are transformed into _CallItems and put in + call_queue. + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems. + """ + while True: + if call_queue.full(): + return + try: + work_id = work_ids.get(block=False) + except queue.Empty: + return + else: + work_item = pending_work_items[work_id] + + if work_item.future.set_running_or_notify_cancel(): + call_queue.put(_CallItem(work_id, + work_item.fn, + work_item.args, + work_item.kwargs), + block=True) + else: + del pending_work_items[work_id] + continue + +def _queue_management_worker(executor_reference, + processes, + pending_work_items, + work_ids_queue, + call_queue, + result_queue): + """Manages the communication between this process and the worker processes. + + This function is run in a local thread. + + Args: + executor_reference: A weakref.ref to the ProcessPoolExecutor that owns + this thread. Used to determine if the ProcessPoolExecutor has been + garbage collected and that this function can exit. + process: A list of the multiprocessing.Process instances used as + workers. + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems for processing by the process workers. + result_queue: A multiprocessing.Queue of _ResultItems generated by the + process workers. + """ + executor = None + + def shutting_down(): + return _shutdown or executor is None or executor._shutdown_thread + + def shutdown_worker(): + # This is an upper bound + nb_children_alive = sum(p.is_alive() for p in processes.values()) + for i in range(0, nb_children_alive): + call_queue.put_nowait(None) + # Release the queue's resources as soon as possible. + call_queue.close() + # If .join() is not called on the created processes then + # some multiprocessing.Queue methods may deadlock on Mac OS X. + for p in processes.values(): + p.join() + + reader = result_queue._reader + + while True: + _add_call_item_to_queue(pending_work_items, + work_ids_queue, + call_queue) + + sentinels = [p.sentinel for p in processes.values()] + assert sentinels + ready = wait([reader] + sentinels) + if reader in ready: + result_item = reader.recv() + else: + # Mark the process pool broken so that submits fail right now. + executor = executor_reference() + if executor is not None: + executor._broken = True + executor._shutdown_thread = True + executor = None + # All futures in flight must be marked failed + for work_id, work_item in pending_work_items.items(): + work_item.future.set_exception( + BrokenProcessPool( + "A process in the process pool was " + "terminated abruptly while the future was " + "running or pending." + )) + # Delete references to object. See issue16284 + del work_item + pending_work_items.clear() + # Terminate remaining workers forcibly: the queues or their + # locks may be in a dirty state and block forever. + for p in processes.values(): + p.terminate() + shutdown_worker() + return + if isinstance(result_item, int): + # Clean shutdown of a worker using its PID + # (avoids marking the executor broken) + assert shutting_down() + p = processes.pop(result_item) + p.join() + if not processes: + shutdown_worker() + return + elif result_item is not None: + work_item = pending_work_items.pop(result_item.work_id, None) + # work_item can be None if another process terminated (see above) + if work_item is not None: + if result_item.exception: + work_item.future.set_exception(result_item.exception) + else: + work_item.future.set_result(result_item.result) + # Delete references to object. See issue16284 + del work_item + # Check whether we should start shutting down. + executor = executor_reference() + # No more work items can be added if: + # - The interpreter is shutting down OR + # - The executor that owns this worker has been collected OR + # - The executor that owns this worker has been shutdown. + if shutting_down(): + try: + # Since no new work items can be added, it is safe to shutdown + # this thread if there are no pending work items. + if not pending_work_items: + shutdown_worker() + return + except Full: + # This is not a problem: we will eventually be woken up (in + # result_queue.get()) and be able to send a sentinel again. + pass + executor = None + +_system_limits_checked = False +_system_limited = None +def _check_system_limits(): + global _system_limits_checked, _system_limited + if _system_limits_checked: + if _system_limited: + raise NotImplementedError(_system_limited) + _system_limits_checked = True + try: + nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems_max == -1: + # indetermined limit, assume that limit is determined + # by available memory only + return + if nsems_max >= 256: + # minimum number of semaphores available + # according to POSIX + return + _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max + raise NotImplementedError(_system_limited) + + +class BrokenProcessPool(RuntimeError): + """ + Raised when a process in a ProcessPoolExecutor terminated abruptly + while a future was in the running state. + """ + + +class ProcessPoolExecutor(_base.Executor): + def __init__(self, max_workers=None): + """Initializes a new ProcessPoolExecutor instance. + + Args: + max_workers: The maximum number of processes that can be used to + execute the given calls. If None or not given then as many + worker processes will be created as the machine has processors. + """ + _check_system_limits() + + if max_workers is None: + self._max_workers = os.cpu_count() or 1 + else: + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + + self._max_workers = max_workers + + # Make the call queue slightly larger than the number of processes to + # prevent the worker processes from idling. But don't make it too big + # because futures in the call queue cannot be cancelled. + self._call_queue = multiprocessing.Queue(self._max_workers + + EXTRA_QUEUED_CALLS) + # Killed worker processes can produce spurious "broken pipe" + # tracebacks in the queue's own worker thread. But we detect killed + # processes anyway, so silence the tracebacks. + self._call_queue._ignore_epipe = True + self._result_queue = SimpleQueue() + self._work_ids = queue.Queue() + self._queue_management_thread = None + # Map of pids to processes + self._processes = {} + + # Shutdown is a two-step process. + self._shutdown_thread = False + self._shutdown_lock = threading.Lock() + self._broken = False + self._queue_count = 0 + self._pending_work_items = {} + + def _start_queue_management_thread(self): + # When the executor gets lost, the weakref callback will wake up + # the queue management thread. + def weakref_cb(_, q=self._result_queue): + q.put(None) + if self._queue_management_thread is None: + # Start the processes so that their sentinels are known. + self._adjust_process_count() + self._queue_management_thread = threading.Thread( + target=_queue_management_worker, + args=(weakref.ref(self, weakref_cb), + self._processes, + self._pending_work_items, + self._work_ids, + self._call_queue, + self._result_queue)) + self._queue_management_thread.daemon = True + self._queue_management_thread.start() + _threads_queues[self._queue_management_thread] = self._result_queue + + def _adjust_process_count(self): + for _ in range(len(self._processes), self._max_workers): + p = multiprocessing.Process( + target=_process_worker, + args=(self._call_queue, + self._result_queue)) + p.start() + self._processes[p.pid] = p + + def submit(self, fn, *args, **kwargs): + with self._shutdown_lock: + if self._broken: + raise BrokenProcessPool('A child process terminated ' + 'abruptly, the process pool is not usable anymore') + if self._shutdown_thread: + raise RuntimeError('cannot schedule new futures after shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._pending_work_items[self._queue_count] = w + self._work_ids.put(self._queue_count) + self._queue_count += 1 + # Wake up queue management thread + self._result_queue.put(None) + + self._start_queue_management_thread() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def map(self, fn, *iterables, timeout=None, chunksize=1): + """Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: If greater than one, the iterables will be chopped into + chunks of size chunksize and submitted to the process pool. + If set to one, the items in the list will be sent one at a time. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + if chunksize < 1: + raise ValueError("chunksize must be >= 1.") + + results = super().map(partial(_process_chunk, fn), + _get_chunks(*iterables, chunksize=chunksize), + timeout=timeout) + return itertools.chain.from_iterable(results) + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown_thread = True + if self._queue_management_thread: + # Wake up queue management thread + self._result_queue.put(None) + if wait: + self._queue_management_thread.join() + # To reduce the risk of opening too many files, remove references to + # objects that use file descriptors. + self._queue_management_thread = None + self._call_queue = None + self._result_queue = None + self._processes = None + shutdown.__doc__ = _base.Executor.shutdown.__doc__ + +atexit.register(_python_exit) diff --git a/Lib/concurrent/futures/thread.py b/Lib/concurrent/futures/thread.py new file mode 100644 index 0000000000..03d276b63f --- /dev/null +++ b/Lib/concurrent/futures/thread.py @@ -0,0 +1,145 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ThreadPoolExecutor.""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +import atexit +from concurrent.futures import _base +import queue +import threading +import weakref +import os + +# Workers are created as daemon threads. This is done to allow the interpreter +# to exit when there are still idle threads in a ThreadPoolExecutor's thread +# pool (i.e. shutdown() was not called). However, allowing workers to die with +# the interpreter has two undesirable properties: +# - The workers would still be running during interpreter shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads finish. + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False + +def _python_exit(): + global _shutdown + _shutdown = True + items = list(_threads_queues.items()) + for t, q in items: + q.put(None) + for t, q in items: + t.join() + +atexit.register(_python_exit) + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + + def run(self): + if not self.future.set_running_or_notify_cancel(): + return + + try: + result = self.fn(*self.args, **self.kwargs) + except BaseException as e: + self.future.set_exception(e) + else: + self.future.set_result(result) + +def _worker(executor_reference, work_queue): + try: + while True: + work_item = work_queue.get(block=True) + if work_item is not None: + work_item.run() + # Delete references to object. See issue16284 + del work_item + continue + executor = executor_reference() + # Exit if: + # - The interpreter is shutting down OR + # - The executor that owns the worker has been collected OR + # - The executor that owns the worker has been shutdown. + if _shutdown or executor is None or executor._shutdown: + # Notice other workers + work_queue.put(None) + return + del executor + except BaseException: + _base.LOGGER.critical('Exception in worker', exc_info=True) + +class ThreadPoolExecutor(_base.Executor): + def __init__(self, max_workers=None, thread_name_prefix=''): + """Initializes a new ThreadPoolExecutor instance. + + Args: + max_workers: The maximum number of threads that can be used to + execute the given calls. + thread_name_prefix: An optional name prefix to give our threads. + """ + if max_workers is None: + # Use this number because ThreadPoolExecutor is often + # used to overlap I/O instead of CPU work. + max_workers = (os.cpu_count() or 1) * 5 + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + + self._max_workers = max_workers + self._work_queue = queue.Queue() + self._threads = set() + self._shutdown = False + self._shutdown_lock = threading.Lock() + self._thread_name_prefix = thread_name_prefix + + def submit(self, fn, *args, **kwargs): + with self._shutdown_lock: + if self._shutdown: + raise RuntimeError('cannot schedule new futures after shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._work_queue.put(w) + self._adjust_thread_count() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def _adjust_thread_count(self): + # When the executor gets lost, the weakref callback will wake up + # the worker threads. + def weakref_cb(_, q=self._work_queue): + q.put(None) + # TODO(bquinlan): Should avoid creating new threads if there are more + # idle threads than items in the work queue. + num_threads = len(self._threads) + if num_threads < self._max_workers: + thread_name = '%s_%d' % (self._thread_name_prefix or self, + num_threads) + t = threading.Thread(name=thread_name, target=_worker, + args=(weakref.ref(self, weakref_cb), + self._work_queue)) + t.daemon = True + t.start() + self._threads.add(t) + _threads_queues[t] = self._work_queue + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown = True + self._work_queue.put(None) + if wait: + for t in self._threads: + t.join() + shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/Lib/configparser.py b/Lib/configparser.py new file mode 100644 index 0000000000..af5aca1fea --- /dev/null +++ b/Lib/configparser.py @@ -0,0 +1,1339 @@ +"""Configuration file parser. + +A configuration file consists of sections, lead by a "[section]" header, +and followed by "name: value" entries, with continuations and such in +the style of RFC 822. + +Intrinsic defaults can be specified by passing them into the +ConfigParser constructor as a dictionary. + +class: + +ConfigParser -- responsible for parsing a list of + configuration files, and managing the parsed database. + + methods: + + __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, + delimiters=('=', ':'), comment_prefixes=('#', ';'), + inline_comment_prefixes=None, strict=True, + empty_lines_in_values=True, default_section='DEFAULT', + interpolation=, converters=): + Create the parser. When `defaults' is given, it is initialized into the + dictionary or intrinsic defaults. The keys must be strings, the values + must be appropriate for %()s string interpolation. + + When `dict_type' is given, it will be used to create the dictionary + objects for the list of sections, for the options within a section, and + for the default values. + + When `delimiters' is given, it will be used as the set of substrings + that divide keys from values. + + When `comment_prefixes' is given, it will be used as the set of + substrings that prefix comments in empty lines. Comments can be + indented. + + When `inline_comment_prefixes' is given, it will be used as the set of + substrings that prefix comments in non-empty lines. + + When `strict` is True, the parser won't allow for any section or option + duplicates while reading from a single source (file, string or + dictionary). Default is True. + + When `empty_lines_in_values' is False (default: True), each empty line + marks the end of an option. Otherwise, internal empty lines of + a multiline option are kept as part of the value. + + When `allow_no_value' is True (default: False), options without + values are accepted; the value presented for these is None. + + When `default_section' is given, the name of the special section is + named accordingly. By default it is called ``"DEFAULT"`` but this can + be customized to point to any other valid section name. Its current + value can be retrieved using the ``parser_instance.default_section`` + attribute and may be modified at runtime. + + When `interpolation` is given, it should be an Interpolation subclass + instance. It will be used as the handler for option value + pre-processing when using getters. RawConfigParser object s don't do + any sort of interpolation, whereas ConfigParser uses an instance of + BasicInterpolation. The library also provides a ``zc.buildbot`` + inspired ExtendedInterpolation implementation. + + When `converters` is given, it should be a dictionary where each key + represents the name of a type converter and each value is a callable + implementing the conversion from string to the desired datatype. Every + converter gets its corresponding get*() method on the parser object and + section proxies. + + sections() + Return all the configuration section names, sans DEFAULT. + + has_section(section) + Return whether the given section exists. + + has_option(section, option) + Return whether the given option exists in the given section. + + options(section) + Return list of configuration options for the named section. + + read(filenames, encoding=None) + Read and parse the list of named configuration files, given by + name. A single filename is also allowed. Non-existing files + are ignored. Return list of successfully read files. + + read_file(f, filename=None) + Read and parse one configuration file, given as a file object. + The filename defaults to f.name; it is only used in error + messages (if f has no `name' attribute, the string `' is used). + + read_string(string) + Read configuration from a given string. + + read_dict(dictionary) + Read configuration from a dictionary. Keys are section names, + values are dictionaries with keys and values that should be present + in the section. If the used dictionary type preserves order, sections + and their keys will be added in order. Values are automatically + converted to strings. + + get(section, option, raw=False, vars=None, fallback=_UNSET) + Return a string value for the named option. All % interpolations are + expanded in the return values, based on the defaults passed into the + constructor and the DEFAULT section. Additional substitutions may be + provided using the `vars' argument, which must be a dictionary whose + contents override any pre-existing defaults. If `option' is a key in + `vars', the value from `vars' is used. + + getint(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to an integer. + + getfloat(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to a float. + + getboolean(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to a boolean (currently case + insensitively defined as 0, false, no, off for False, and 1, true, + yes, on for True). Returns False or True. + + items(section=_UNSET, raw=False, vars=None) + If section is given, return a list of tuples with (name, value) for + each option in the section. Otherwise, return a list of tuples with + (section_name, section_proxy) for each section, including DEFAULTSECT. + + remove_section(section) + Remove the given file section and all its options. + + remove_option(section, option) + Remove the given option from the given section. + + set(section, option, value) + Set the given option. + + write(fp, space_around_delimiters=True) + Write the configuration state in .ini format. If + `space_around_delimiters' is True (the default), delimiters + between keys and values are surrounded by spaces. +""" + +from collections.abc import MutableMapping +from collections import OrderedDict as _default_dict, ChainMap as _ChainMap +import functools +import io +import itertools +import re +import sys +import warnings + +__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", + "NoOptionError", "InterpolationError", "InterpolationDepthError", + "InterpolationMissingOptionError", "InterpolationSyntaxError", + "ParsingError", "MissingSectionHeaderError", + "ConfigParser", "SafeConfigParser", "RawConfigParser", + "Interpolation", "BasicInterpolation", "ExtendedInterpolation", + "LegacyInterpolation", "SectionProxy", "ConverterMapping", + "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] + +DEFAULTSECT = "DEFAULT" + +MAX_INTERPOLATION_DEPTH = 10 + + + +# exception classes +class Error(Exception): + """Base class for ConfigParser exceptions.""" + + def __init__(self, msg=''): + self.message = msg + Exception.__init__(self, msg) + + def __repr__(self): + return self.message + + __str__ = __repr__ + + +class NoSectionError(Error): + """Raised when no section matches a requested option.""" + + def __init__(self, section): + Error.__init__(self, 'No section: %r' % (section,)) + self.section = section + self.args = (section, ) + + +class DuplicateSectionError(Error): + """Raised when a section is repeated in an input source. + + Possible repetitions that raise this exception are: multiple creation + using the API or in strict parsers when a section is found more than once + in a single input file, string or dictionary. + """ + + def __init__(self, section, source=None, lineno=None): + msg = [repr(section), " already exists"] + if source is not None: + message = ["While reading from ", repr(source)] + if lineno is not None: + message.append(" [line {0:2d}]".format(lineno)) + message.append(": section ") + message.extend(msg) + msg = message + else: + msg.insert(0, "Section ") + Error.__init__(self, "".join(msg)) + self.section = section + self.source = source + self.lineno = lineno + self.args = (section, source, lineno) + + +class DuplicateOptionError(Error): + """Raised by strict parsers when an option is repeated in an input source. + + Current implementation raises this exception only when an option is found + more than once in a single file, string or dictionary. + """ + + def __init__(self, section, option, source=None, lineno=None): + msg = [repr(option), " in section ", repr(section), + " already exists"] + if source is not None: + message = ["While reading from ", repr(source)] + if lineno is not None: + message.append(" [line {0:2d}]".format(lineno)) + message.append(": option ") + message.extend(msg) + msg = message + else: + msg.insert(0, "Option ") + Error.__init__(self, "".join(msg)) + self.section = section + self.option = option + self.source = source + self.lineno = lineno + self.args = (section, option, source, lineno) + + +class NoOptionError(Error): + """A requested option was not found.""" + + def __init__(self, option, section): + Error.__init__(self, "No option %r in section: %r" % + (option, section)) + self.option = option + self.section = section + self.args = (option, section) + + +class InterpolationError(Error): + """Base class for interpolation-related exceptions.""" + + def __init__(self, option, section, msg): + Error.__init__(self, msg) + self.option = option + self.section = section + self.args = (option, section, msg) + + +class InterpolationMissingOptionError(InterpolationError): + """A string substitution required a setting which was not available.""" + + def __init__(self, option, section, rawval, reference): + msg = ("Bad value substitution: option {!r} in section {!r} contains " + "an interpolation key {!r} which is not a valid option name. " + "Raw value: {!r}".format(option, section, reference, rawval)) + InterpolationError.__init__(self, option, section, msg) + self.reference = reference + self.args = (option, section, rawval, reference) + + +class InterpolationSyntaxError(InterpolationError): + """Raised when the source text contains invalid syntax. + + Current implementation raises this exception when the source text into + which substitutions are made does not conform to the required syntax. + """ + + +class InterpolationDepthError(InterpolationError): + """Raised when substitutions are nested too deeply.""" + + def __init__(self, option, section, rawval): + msg = ("Recursion limit exceeded in value substitution: option {!r} " + "in section {!r} contains an interpolation key which " + "cannot be substituted in {} steps. Raw value: {!r}" + "".format(option, section, MAX_INTERPOLATION_DEPTH, + rawval)) + InterpolationError.__init__(self, option, section, msg) + self.args = (option, section, rawval) + + +class ParsingError(Error): + """Raised when a configuration file does not follow legal syntax.""" + + def __init__(self, source=None, filename=None): + # Exactly one of `source'/`filename' arguments has to be given. + # `filename' kept for compatibility. + if filename and source: + raise ValueError("Cannot specify both `filename' and `source'. " + "Use `source'.") + elif not filename and not source: + raise ValueError("Required argument `source' not given.") + elif filename: + source = filename + Error.__init__(self, 'Source contains parsing errors: %r' % source) + self.source = source + self.errors = [] + self.args = (source, ) + + @property + def filename(self): + """Deprecated, use `source'.""" + warnings.warn( + "The 'filename' attribute will be removed in future versions. " + "Use 'source' instead.", + DeprecationWarning, stacklevel=2 + ) + return self.source + + @filename.setter + def filename(self, value): + """Deprecated, user `source'.""" + warnings.warn( + "The 'filename' attribute will be removed in future versions. " + "Use 'source' instead.", + DeprecationWarning, stacklevel=2 + ) + self.source = value + + def append(self, lineno, line): + self.errors.append((lineno, line)) + self.message += '\n\t[line %2d]: %s' % (lineno, line) + + +class MissingSectionHeaderError(ParsingError): + """Raised when a key-value pair is found before any section header.""" + + def __init__(self, filename, lineno, line): + Error.__init__( + self, + 'File contains no section headers.\nfile: %r, line: %d\n%r' % + (filename, lineno, line)) + self.source = filename + self.lineno = lineno + self.line = line + self.args = (filename, lineno, line) + + +# Used in parser getters to indicate the default behaviour when a specific +# option is not found it to raise an exception. Created to enable `None' as +# a valid fallback value. +_UNSET = object() + + +class Interpolation: + """Dummy interpolation that passes the value through with no changes.""" + + def before_get(self, parser, section, option, value, defaults): + return value + + def before_set(self, parser, section, option, value): + return value + + def before_read(self, parser, section, option, value): + return value + + def before_write(self, parser, section, option, value): + return value + + +class BasicInterpolation(Interpolation): + """Interpolation as implemented in the classic ConfigParser. + + The option values can contain format strings which refer to other values in + the same section, or values in the special default section. + + For example: + + something: %(dir)s/whatever + + would resolve the "%(dir)s" to the value of dir. All reference + expansions are done late, on demand. If a user needs to use a bare % in + a configuration file, she can escape it by writing %%. Other % usage + is considered a user error and raises `InterpolationSyntaxError'.""" + + _KEYCRE = re.compile(r"%\(([^)]+)\)s") + + def before_get(self, parser, section, option, value, defaults): + L = [] + self._interpolate_some(parser, option, L, value, section, defaults, 1) + return ''.join(L) + + def before_set(self, parser, section, option, value): + tmp_value = value.replace('%%', '') # escaped percent signs + tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax + if '%' in tmp_value: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, tmp_value.find('%'))) + return value + + def _interpolate_some(self, parser, option, accum, rest, section, map, + depth): + rawval = parser.get(section, option, raw=True, fallback=rest) + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rawval) + while rest: + p = rest.find("%") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "%": + accum.append("%") + rest = rest[2:] + elif c == "(": + m = self._KEYCRE.match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + var = parser.optionxform(m.group(1)) + rest = rest[m.end():] + try: + v = map[var] + except KeyError: + raise InterpolationMissingOptionError( + option, section, rawval, var) from None + if "%" in v: + self._interpolate_some(parser, option, accum, v, + section, map, depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'%%' must be followed by '%%' or '(', " + "found: %r" % (rest,)) + + +class ExtendedInterpolation(Interpolation): + """Advanced variant of interpolation, supports the syntax used by + `zc.buildout'. Enables interpolation between sections.""" + + _KEYCRE = re.compile(r"\$\{([^}]+)\}") + + def before_get(self, parser, section, option, value, defaults): + L = [] + self._interpolate_some(parser, option, L, value, section, defaults, 1) + return ''.join(L) + + def before_set(self, parser, section, option, value): + tmp_value = value.replace('$$', '') # escaped dollar signs + tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax + if '$' in tmp_value: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, tmp_value.find('$'))) + return value + + def _interpolate_some(self, parser, option, accum, rest, section, map, + depth): + rawval = parser.get(section, option, raw=True, fallback=rest) + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rawval) + while rest: + p = rest.find("$") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "$": + accum.append("$") + rest = rest[2:] + elif c == "{": + m = self._KEYCRE.match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + path = m.group(1).split(':') + rest = rest[m.end():] + sect = section + opt = option + try: + if len(path) == 1: + opt = parser.optionxform(path[0]) + v = map[opt] + elif len(path) == 2: + sect = path[0] + opt = parser.optionxform(path[1]) + v = parser.get(sect, opt, raw=True) + else: + raise InterpolationSyntaxError( + option, section, + "More than one ':' found: %r" % (rest,)) + except (KeyError, NoSectionError, NoOptionError): + raise InterpolationMissingOptionError( + option, section, rawval, ":".join(path)) from None + if "$" in v: + self._interpolate_some(parser, opt, accum, v, sect, + dict(parser.items(sect, raw=True)), + depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'$' must be followed by '$' or '{', " + "found: %r" % (rest,)) + + +class LegacyInterpolation(Interpolation): + """Deprecated interpolation used in old versions of ConfigParser. + Use BasicInterpolation or ExtendedInterpolation instead.""" + + _KEYCRE = re.compile(r"%\(([^)]*)\)s|.") + + def before_get(self, parser, section, option, value, vars): + rawval = value + depth = MAX_INTERPOLATION_DEPTH + while depth: # Loop through this until it's done + depth -= 1 + if value and "%(" in value: + replace = functools.partial(self._interpolation_replace, + parser=parser) + value = self._KEYCRE.sub(replace, value) + try: + value = value % vars + except KeyError as e: + raise InterpolationMissingOptionError( + option, section, rawval, e.args[0]) from None + else: + break + if value and "%(" in value: + raise InterpolationDepthError(option, section, rawval) + return value + + def before_set(self, parser, section, option, value): + return value + + @staticmethod + def _interpolation_replace(match, parser): + s = match.group(1) + if s is None: + return match.group() + else: + return "%%(%s)s" % parser.optionxform(s) + + +class RawConfigParser(MutableMapping): + """ConfigParser that does not do interpolation.""" + + # Regular expressions for parsing section headers and options + _SECT_TMPL = r""" + \[ # [ + (?P
[^]]+) # very permissive! + \] # ] + """ + _OPT_TMPL = r""" + (?P