diff --git a/.travis.yml b/.travis.yml index f76f56010..ae754f419 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,42 +1,11 @@ -language: generic +version: ~> 1.0 -dist: precise +language: generic -matrix: - include: - - os: osx - osx_image: xcode8 - - os: osx - osx_image: xcode8 - env: SYMLINK_INSTALL=true - - os: linux - sudo: false - - os: linux - sudo: false - env: SYMLINK_INSTALL=true +dist: xenial install: -- | - if [[ ${MASON_PLATFORM:-unset} == 'unset' ]] || [[ ${MASON_PLATFORM} == 'osx' ]] || [[ ${MASON_PLATFORM} == 'linux' ]]; then - # If platform is Linux or OSX, we might still be cross compiling if the - # MASON_PLATFORM_VERSION is not the same as the host platform. - if [[ ${MASON_PLATFORM_VERSION} == `uname -m` ]] || [[ -z ${MASON_PLATFORM_VERSION} ]]; then - echo "Installing clang for OSX/Linux via 'install' target" - source utils/toolchain.sh - which ${CC} - ${CC} -v - which ${CXX} - ${CXX} -v - fi - fi + - source utils/toolchain.sh script: -- | - if [[ ${SYMLINK_INSTALL:-} == true ]]; then - mkdir -p custom/path - ln -s $(pwd)/mason $(pwd)/custom/path/mason - export PATH=$(pwd)/custom/path:${PATH} - MASON_CUSTOM_PATH=true ./test/all.sh - else - ./test/all.sh; - fi + - if [ ! ${MASON_NAME:-} ]; then ./test/all.sh; fi; diff --git a/CHANGELOG.md b/CHANGELOG.md index a47067acf..061846e16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,113 @@ # Mason releases +## 0.23.0 + - LLVM 11 now available + - Boost 1.75.0 now available + +## 0.22.0 + + - Binaries now compiled with clang++ 10.0.0 + - Lots of new packages, too many to list here anymore. + +Changes: https://github.com/mapbox/mason/compare/v0.21.0...v0.22.0 + +## 0.21.0 + + - Fixes for curl downloads + - http -> https fixes + - Lots of new packages, too many to list here anymore. + +Changes: https://github.com/mapbox/mason/compare/v0.20.0...v0.21.0 + +## 0.20.0 + +- Added + - boost 1.67.0 + - boost_libregex_icu58 1.66.0 + - icu 58.1-brkitr (+ BreakIterator), 63.1-min-static-data-1 + - libosmium 2.15.0 + - osmium-tool 1.10.0 + - postgres 10.3 (with libedit support) + - swiftshader 2018-10-08-3b5e426 + - tippecanoe 1.32.10 + - vector-tile 1.0.3 + +- Changed + - alpine linux install compatibility fixes [c7ea656](https://github.com/mapbox/mason/commit/c7ea6566e6503e4b6c5e19cb53af08445c931687) [f6ab273](https://github.com/mapbox/mason/commit/f6ab273d30f210147a3f44e185f7959e65640f8d) + - harfbuzz 1.4.4 - clear environment before building ragel [#393](https://github.com/mapbox/mason/pull/393) h/t @edkimmel + +Changes: https://github.com/mapbox/mason/compare/v0.19.0...v0.20.0 + +## 0.19.0 + +- Added + - LICENSE.md + - apitrace 2018-05-16-7fadfba + - benchmark 1.4.1 + - binutils 2.31 + - build2 0.8.0 + - catch 2.4.0 + - clang++ 6.0.1 + - clang-format 6.0.1 + - clang-tidy 6.0.0, 6.0.1, 7.0.0 + - crosstool-ng 1.23.0 + - geojson 0.4.3 + - geojsonvt 6.4.0, 6.5.0, 6.5.1, 6.6.0, 6.6.1, 6.6.2, 6.6.3 + - geometry 0.9.3, 1.0.0 + - glfw 2018-06-27-0be4f3f + - gtest 1.8.0_1 + - gzip-hpp 0.1.0, a4cfa6a638de351d26834cf2fea373693cdaa927, bb80aac + - icu 63.1-min-static-data + - include-what-you-use 6.0.1 + - jemalloc 39b1b20, 5.1.0 + - jni.hpp 4.0.0, 4.0.1 + - kcov 34, 894e98b + - kdbush 0.1.3 + - libdeflate 1.0, e9d1014 + - libosmium 2.14.0, 2.14.1, 2.14.2 + - lldb 6.0.1 + - llnode 1.7.1 + - llvm-cov 6.0.1 + - llvm 6.0.1 + - lz4 1.8.2 + - mapnik 3.0.19, 3.0.20, 3.0.21, 434511c, da69fdf66 + - mbgl-core 20f880e + - node 6.14.3, 8.11.3 + - nunicode 1.8 + - osmium-tool 1.9.0, 1.9.1, 336eb45 + - perf 4.15.15, 4.16 + - protobuf 3.5.0, 3.5.1 + - protozero 1.6.2, 1.6.3, 1.6.4 + - sqlite 3.24.0-min-size + - supercluster 0.3.0, 0.3.2 + - swiftshader 2018-05-31, 2018-06-29-539468c, 60f8662 + - tippecanoe 1.27.14, 1.27.7, 1.27.9, 1.31.0 + - valhalla 2.4.9 + - vector-tile 1.0.2 + - vtzero 088ec09, 1.0.0, 1.0.1, 1.0.2, 1.0.3, 2915725 + - zlib-cloudflare e55212b + - zlib-ng 013b23b + +- Changed + - bzip2 1.0.6 + - clang++ 6.0.0 + - clang++ 7.0.0 + - clang-format 6.0.0 + - clang-format 7.0.0 + - clang-tidy 6.0.0 + - clang-tidy 7.0.0 + - include-what-you-use 6.0.0 + - include-what-you-use 7.0.0 + - lldb 6.0.0 + - lldb 7.0.0 + - llvm-cov 6.0.0 + - llvm-cov 7.0.0 + - llvm 6.0.0 + - llvm 7.0.0 + - openssl 1.0.2d + +Changes: https://github.com/mapbox/mason/compare/v0.18.0...v0.19.0 + ## 0.18.0 - Added diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 000000000..e88c86db5 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,23 @@ +Copyright (c) 2014-2018, Mapbox, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md index 53c27b6a1..beeeef5b5 100644 --- a/README.md +++ b/README.md @@ -1,46 +1,59 @@ # Mason -Mason can compile C/C++ code, publish packages to S3, and install those packages via binaries. +❌ **UNMAINTAINED: this project is no longer maintained and no additional development is planned. Nor is any kind of deprecation planned. Rather this code is now frozen. If you are depending on this module in any of your applications, it is recommended you remove the dependency and/or find a replacement solution** -Mason does all its work locally inside `./mason_packages` and therefore does not require privileges for, or conflict with, system-wide packages. +Mason is a cross-platform, command-line package manager for C/C++ applications. -Mason works on both OS X and Linux. +Mason is like: -[![Build Status](https://travis-ci.org/mapbox/mason.svg?branch=master)](https://travis-ci.org/mapbox/mason) +* [npm](https://github.com/npm/npm) because it installs packages in the current working directory (`./mason_packages`) rather than globally (and therefore does not require privileges for, or conflict with, system-wide packages) +* [homebrew](http://brew.sh/) because it requires no use of `sudo` to install packages +* [apt-get](https://linux.die.net/man/8/apt-get) or [yum](https://linux.die.net/man/8/yum) because it works on Linux -### Goals +Mason is unlike: -Mason is a package manager designed for developers who package standalone applications and who need complete control over dependency versions. + * all of the above... -Because Mason is developed by Mapbox the default S3 bucket and set of available packages are driven by Mapbox developers. For other developers: 1) fork Mason and ideally rename it to something like `mason-{yourorg}`, 2) configure it against your own s3 bucket, and 3) go ahead an publish your own packages to an S3 bucket of your creation. + Mason is a collection of bash scripts and does not depend on any specific runtime language, such as python, node.js, or ruby. It can build and publish a single set of binaries (>= OS X 10.8 and >= Ubuntu Precise), publish header-only files, and install packages. Mason has integrations with [Travis CI](https://travis-ci.com) and [Amazon S3](https://aws.amazon.com/s3) for automated build and deployment. -### Comparisons + Mason strongly prefers static libraries over shared libraries and has zero understanding of dependency trees: it leaves complete control to the developer for how packages relate. -Mason is like [npm](https://github.com/npm/npm) because it installs packages in the current working directory (`./mason_packages`) rather than globally. +Mason works on both **OS X** and **Linux**. -Mason is like [homebrew](http://brew.sh/) because it requires no use of `sudo` to install packages. +[![Build Status](https://travis-ci.com/mapbox/mason.svg?branch=master)](https://travis-ci.com/mapbox/mason) -Mason is like linux package managers like `apt-get` or `yum` because it works on linux. +# Table of Contents -Mason is unlike all of the above package managers because: - - - Mason runs on both Linux and OS X and creates a single set of binaries that work on >= OS X 10.8 and >= Ubuntu Precise (rather than building binaries per version). - - Mason strongly prefers static libraries over shared libraries - - Mason has zero understanding of dependency trees: it leaves complete control to the developer for how packages relate. - - Mason does not depend on any specific runtime language (like python, node.js or ruby). It is a just a few bash scripts. - - Mason depends on [travis.ci](https://travis-ci.org) for creating and publishing binaries. +- [Installation](#installation) +- [Configuration](#configuration) +- [Usage](#usage) +- [Creating a package](#creating-a-package) + - [Prerequisites](#prerequisites) + - [Getting started](#getting-started) + - [System packages](#system-packages) +- [Releasing a package](#releasing-a-package) +- [Using a package](#using-a-package) +- [Mason internals](#mason-internals) + - [Mason scripts](#mason-scripts) + - [Mason variables](#mason-variables) + - [Mason functions](#mason-functions) +- [Troubleshooting](#troubleshooting) ## Installation -There are three recommended ways to install mason: via curl, via a submodule, or via bundling `mason.cmake` +There are three recommended ways to install mason, via: + +* [Curl](#curl) +* [Submodule](#submodule) +* [mason.cmake](#masoncmake) -#### Curl install +#### Curl To install mason locally: ```sh mkdir ./mason -curl -sSfL https://github.com/mapbox/mason/archive/v0.18.0.tar.gz | tar --gunzip --extract --strip-components=1 --exclude="*md" --exclude="test*" --directory=./mason +curl -sSfL https://github.com/mapbox/mason/archive/v0.23.0.tar.gz | tar -z --extract --strip-components=1 --exclude="*md" --exclude="test*" --directory=./mason ``` Then you can use the `mason` command like: `./mason/mason install ` @@ -48,14 +61,14 @@ Then you can use the `mason` command like: `./mason/mason install ` #### Submodule -Mason can also be added a submodule to your repository. This is helpful for other contributors to get set up quickly. +Mason can also be added as a submodule to your repository. This is helpful for other contributors to get set up quickly. Optionally a convention when using submodules, is to place the submodule at a path starting with `.` to make the directory hidden to most file browsers. If you want your mason folder hidden then make sure to include the final part of the following command `.mason/` so your submodule path has the leading `.` instead of just being `mason/`. @@ -99,8 +112,15 @@ Then in your `CmakeLists.txt` install packages like: mason_use( VERSION HEADER_ONLY) ``` -Note: Leave out `HEADER_ONLY` if the package is a [pre-compiled library](https://github.com/mapbox/cpp/blob/master/glossary.md#precompiled-library). You can see if a package is `HEADER_ONLY` by looking inside the `script.sh` for `MASON_HEADER_ONLY=true` like https://github.com/mapbox/mason/blob/68871660b74023234fa96d482898c820a55bd4bf/scripts/geometry/0.9.0/script.sh#L5 +_Note: Leave out `HEADER_ONLY` if the package is a [precompiled library](https://github.com/mapbox/cpp/blob/master/glossary.md#precompiled-library). You can see if a package is `HEADER_ONLY` by looking inside the `script.sh` for `MASON_HEADER_ONLY=true` like https://github.com/mapbox/mason/blob/68871660b74023234fa96d482898c820a55bd4bf/scripts/geometry/0.9.0/script.sh#L5_ + +## Configuration +By default Mason publishes packages to a Mapbox-managed S3 bucket. If you want to publish to a different bucket we recommend taking the following steps: + +1. Fork Mason and rename it to `mason-{your_org}` +2. Set [`MASON_BUCKET`](https://github.com/mapbox/mason/blob/2765e4ab50ca2c1865048e8403ef28b696228f7b/mason.sh#L6) to your own S3 bucket +3. Publish packages to the new location ## Usage @@ -112,18 +132,18 @@ mason The `command` can be one of the following -* `install`: Installs the specified library/version -* `remove`: Removes the specified library/version -* `build`: Forces a build from source (= skip pre-built binary detection) -* `publish`: Uploads the built binaries to the S3 bucket -* `prefix`: Prints the absolute path to the library installation directory -* `version`: Prints the actual version of the library (only useful when version is `system`) -* `cflags`: Prints C/C++ compiler flags -* `ldflags`: Prints linker flags -* `link`: Creates symlinks for packages in `mason_packages/.link` -* `trigger`: Trigger a build and publish operation on Travis CI +* [install](#install) - installs the specified library/version +* [remove](#remove) - removes the specified library/version +* [build](#build) - forces a build from source (= skip pre-built binary detection) +* [publish](#publish) - uploads packages to the S3 bucket +* [prefix](#prefix) - prints the absolute path to the library installation directory +* [version](#version) - prints the actual version of the library (only useful when version is `system`) +* [cflags](#cflags) - prints C/C++ compiler flags +* [ldflags](#ldflags) - prints linker flags +* [link](#link) - creates symlinks for packages in `mason_packages/.link` +* [trigger](#trigger) - trigger a build and publish operation on Travis CI -### `install` +#### install ```bash $ mason install libuv 0.11.29 @@ -140,8 +160,7 @@ Next, Mason checks whether there are pre-built binaries available in the S3 buck If no pre-built binaries are available, Mason is going to build the library according to the script in the `mason_packages/.build` folder, and install into the platform- and library-specific directory. - -### `remove` +#### remove ```bash $ mason remove libuv 0.11.29 @@ -153,15 +172,15 @@ $ mason remove libuv 0.11.29 Removes the specified library/version from the package directory. -### `build` +#### build -This command works like the `install` command, except that it *doesn't* check for existing library installations, and that it *doesn't* check for pre-built binaries. I.e. it first removes the current installation and *always* builds the library from source. This is useful when you are working on a build script and want to fresh builds. +This command works like the `install` command, except that it *doesn't* check for existing library installations, and that it *doesn't* check for pre-built binaries, i.e. it first removes the current installation and *always* builds the library from source. This is useful when you are working on a build script and want to fresh builds. -### `publish` +#### publish Creates a gzipped tarball of the specified platform/library/version and uploads it to the `mason-binaries` S3 bucket. If you want to use this feature, you need write access to the bucket and need to specify the environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. -### `prefix` +#### prefix ```bash ~ $ mason prefix libuv 0.11.29 @@ -170,7 +189,16 @@ Creates a gzipped tarball of the specified platform/library/version and uploads This prints the absolute path to the installation directory of the the library/version. Likely, this folder has the typical `include` and `lib` folders. -### `cflags` +#### version + +```bash +~ $ mason version zlib system +1.2.11 +``` + +This prints the version of the library, which is only useful when version is `system`. See [System packages](#system-packages) for more details. + +#### cflags ```bash ~ $ mason cflags libuv 0.11.29 @@ -179,7 +207,7 @@ This prints the absolute path to the installation directory of the the library/v Prints the C/C++ compiler flags that are required to compile source code with this library. Likely, this is just the include path, but may also contain other flags. -### `ldflags` +#### ldflags ```bash ~ $ mason ldflags libuv 0.11.29 @@ -188,7 +216,7 @@ Prints the C/C++ compiler flags that are required to compile source code with th Prints the linker flags that are required to link against this library. -### `link` +#### link ```bash ~ $ mason link libuv 0.11.29 @@ -196,9 +224,9 @@ Prints the linker flags that are required to link against this library. This command only works if the package has already been installed. When run it symlinks the versioned `lib`, `include`, `share`, and `bin` folders of the package into a shared structure that is unversioned. For example if `mason prefix libuv 0.11.29` was `./mason_packages/osx-10.10/libuv/0.11.29` then the library would become available at `./mason_packages/.link/lib/libuv.a` -### `trigger` +#### trigger -In order to ensure that all prebuilt binaries are consistent and reproducible, we perform the final build and publish operation on Travis CI. +In order to ensure that all pre-built binaries are consistent and reproducible, we perform the final build and publish operation on Travis CI. First set the `MASON_TRAVIS_TOKEN` environment variable. You can do this either by installing the `travis` gem and running `travis token` or by using `curl` to hit the Travis api directly. See details on this below. **WARNING: be careful to keep this token safe. Cycling it requires emailing support@travis-ci.com. Giving someone an access token is like giving them full access to your Travis account.** @@ -219,11 +247,11 @@ Then set that in your environment and run: ```sh GITHUB_TOKEN= -curl -s -i https://api.travis-ci.org/auth/github \ +curl -s -i https://api.travis-ci.com/auth/github \ -H "User-Agent: Travis/1.0" \ -H "Content-Type: application/json" \ -H "Accept: application/vnd.travis-ci.2+json" \ - -H "Host: api.travis-ci.org" \ + -H "Host: api.travis-ci.com" \ -d "{\"github_token\": \"${GITHUB_TOKEN}\"}" ``` @@ -231,141 +259,253 @@ curl -s -i https://api.travis-ci.org/auth/github \ For details see https://docs.travis-ci.com/user/triggering-builds and https://github.com/travis-ci/travis.rb#readme -## Writing build scripts +## Creating a package -Each version of each package has its own directory in `scripts/${package}/${version}`, e.g. [`scripts/libpng/1.6.28`](https://github.com/mapbox/mason/tree/master/scripts/libpng/1.6.28). +### Prerequisites -Below are details on ways to create packages for: +Before getting started you should be able to answer the following questions. - - new packages (not previously unpackaged software) - - new versions of existing packages - - header-only libraries and pre-compiled libraries +**What are you packaging?** -### Creating new packages +There are different steps that you will need to follow depending on the type of library you are packaging. For example, there are fewer steps you need to take if you are creating a package of header-only code. Packaging compiled code has more steps because you'll need to tell Mason how to build your binaries. Another type of package is a [System package](#system-package) which has a unique process as well. -If you are creating a package for previously unpackaged software start by creating a new directory for your `${package}/${version}` from within your mason checkout. +**Are there previous versions already published?** -For example if you want to name your package `yourlib` and it is version `1.0.0` you would do: +Check the [list of packages](https://github.com/mapbox/mason/tree/master/scripts) to see if a previous version of your package exists. It's helpful to copy scripts from a previous version rather than creating code from scratch. -```bash -mkdir -p scripts/yourlib/1.0.0 -``` +**Where can Mason download your code?** -The directory must contain two files: +Every package needs to tell Mason where to download the code that it will build and publish, for example: - - `script.sh` - - `.travis.yml` + - `https://github.com/mapbox/geometry.hpp/archive/v0.9.2.tar.gz` for a Github release: [geometry 0.9.2](https://github.com/mapbox/geometry.hpp/releases/tag/v0.9.2) + - `https://github.com/mapbox/geometry.hpp/archive/b0e41cc5635ff8d50e7e1edb73cadf1d2a7ddc83.zip` for pre-release code hosted on Github: [geometry b0e41cc](https://github.com/mapbox/geometry.hpp/tree/b0e41cc5635ff8d50e7e1edb73cadf1d2a7ddc83) -What you put in those files depend on what type of package you are creating. See: +_Note: Your code doesn't need to be hosted on Github in order for Mason to work. Your code can be hosted anywhere. Another common location is [SourceForge](#https://sourceforge.net/)._ - - [Header-only package](#header-only-package) - - [Pre-compiled library package](#pre-compiled-library-package) +### Getting started -### Creating new versions of packages +These are just basic steps to help get you started. Depending on the complexity of building your code, you might have to review the [Mason scripts](#mason-scripts) section to get a better idea of how to further configure Mason to be able to create your package. -When creating a new package it is recommended to start by copying an existing package. +1. Create a new directory for your package. -Each package must contain two files: + From inside your `mason` checkout, create a directory named `${package}/${version}`. For example, if you have a library named `your-lib` that is version `0.1.0` you would: - - `script.sh` - - `.travis.yml` + ```bash + mkdir -p scripts/your-lib/0.1.0 + ``` -What you put in those files depend on what type of package you are creating. See: +2. Add scripts for building and publishing your package. - - [Header-only package](#header-only-package) - - [Pre-compiled library package](#pre-compiled-library-package) + Each package must have the following two files: `script.sh` and `.travis.yml`. Copy these two files from a previous version of your package. -#### Header-only package + If no previous version of your package exists, it is recommended to copy a simple package that has mostly boiler plate code: -For a header-only library, a good example to copy from is the `geometry` package. You can copying the `geometry` package: + - [geometry](https://github.com/mapbox/mason/tree/master/scripts/geometry/0.9.2) for header-only code + - [libpng](https://github.com/mapbox/mason/tree/master/scripts/libpng/1.6.32) for building and packaging binaries -```bash -cp -r scripts/geometry/0.9.1 scripts/yourlib/1.0.0 -``` + For example, if you're creating the first package of a library named `your-lib` that is version `0.1.0` you would copy scripts from the `geometry` package: -You will not need to edit the `.travis.yml`, but you will need to edit the `script.sh`. + ```bash + cp -r scripts/geometry/0.9.1 scripts/your-lib/0.1.0 + ``` -See the [Script structure](#script-structure) section below for details on the `script.sh` format. A `script.sh` is simplier for header-only libraries, so here is a shortlist of things you need to change: +3. Edit Mason variables in `script.sh`. -- `MASON_NAME`: change it from `geometry` to `yourlib` -- `MASON_VERSION`: change it from `0.9.1` to your version + You **must** set the follow Mason variables: -Then you will also need to update: + - `MASON_NAME`: set to the name of your package, e.g. `your-lib` + - `MASON_VERSION`: set to the package version, e.g. `0.1.0` + - `MASON_BUILD_PATH`: set to the location Mason will use to store header files or binaries before it packages and publishes them - - The github url on line `11` to match your download url - - The hash on line `12`. To do this, it is easiest to run `./mason build yourlib version` and it will fail, but the error message will contain the correct hash. Copy the hash and put it on line `12`. + You **may** also need to set the follow Mason variables: -Now update the `MASON_BUILD_PATH`. You will need to change the text of `geometry.hpp-` to your package name. This will likely work for your entire line: + - Other [Mason variables](#mason-variables) -``` -export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} -``` +4. Override Mason functions in `script.sh`. + + You **must** override the follow Mason functions: -Now make any adjustments needed to the `mason_compile` section, modifying the copying of headers. + - `mason_load_source`: you must call `mason_download` and update its parameters: + - url (first parameter): set to the location of your source code archive, e.g. `https://github.com/mapbox/your-lib/archive/v${MASON_VERSION}.tar.gz` + - checksum (second parameter): set to the checksum you get back after running a checksum function on the source code you want to download. The easiest way to get this checksum is to run `./mason build your-lib 0.1.0` (after setting the `mason_download` url) which will fail with an error message that will contain the correct checksum + - `mason_compile` + - for header-only see [geometry 0.9.2](https://github.com/mapbox/mason/blob/a7e35b0f632a8b2f0e338acc9dda0cff04d2f752/scripts/geometry/0.9.2/script.sh#L19) for an example + - for code that needs to be compiled see [zlib 1.2.8](https://github.com/mapbox/mason/blob/a7e35b0f632a8b2f0e338acc9dda0cff04d2f752/scripts/zlib/1.2.8/script.sh#L20) for an example -#### Pre-compiled library package + You **may** also need to override the follow Mason functions: -For a compiled library package, start by copying the `libpng` package: + - Other [Mason functions](#mason-functions) + +### System packages + +Some packages ship with operating systems or can be easily installed with operating-specific package managers. For example, `libpng` is available on most systems and the version you're using doesn't really matter since it is mature and hasn't added any significant new APIs in recent years. + +The following `script.sh` contains the script code for packaging your system's `libpng`. _Note: To understande this code, make sure to review the [Mason scripts](#mason-scripts) section._ ```bash -cp -r scripts/libpng/1.6.28 scripts/yourlib/1.0.0 +#!/usr/bin/env bash + +MASON_NAME=libpng +MASON_VERSION=system +MASON_SYSTEM_PACKAGE=true + +. ${MASON_DIR}/mason.sh + +if [ ! $(pkg-config libpng --exists; echo $?) = 0 ]; then + mason_error "Cannot find libpng with pkg-config" + exit 1 +fi + +function mason_system_version { + mkdir -p "${MASON_PREFIX}" + cd "${MASON_PREFIX}" + if [ ! -f version ]; then + echo "#include +#include +#include +int main() { + assert(PNG_LIBPNG_VER == png_access_version_number()); + printf(\"%s\", PNG_LIBPNG_VER_STRING); + return 0; +} +" > version.c && ${CC:-cc} version.c $(mason_cflags) $(mason_ldflags) -o version + fi + ./version +} + +function mason_compile { + : +} + +function mason_cflags { + pkg-config libpng --cflags +} + +function mason_ldflags { + pkg-config libpng --libs +} + +mason_run "$@" ``` -See the [Script structure](#script-structure) section below for details on the `script.sh` format. +System packages are marked with `MASON_SYSTEM_PACKAGE=true`. We're also first using `pkg-config` to check whether the library is present at all. The `mason_system_version` function creates a small executable which outputs the actual version. It is the only thing that is cached in the installation directory. + +We have to override the `mason_cflags` and `mason_ldflags` commands since the regular commands return flags for static libraries, but in the case of system packages, we want to dynamically link against the package. + +## Releasing a package + +Here is an example workflow to help get you started: + +1. Create an annotated tag in git for the code you want to package. + + Annotated tags can be stored, checksummed, signed and verified with GNU Privacy Guard (GPG) in Github. To create an annotated tag specify `-a` when running the `tag` command, for example: + + `git tag -a v0.1.0 -m "version 0.1.0"` + +2. Share your new tag. + + You have to explicitly push your new tag to a shared Github server. This is the location we will share with Mason when specifying where to download the code to be packaged. Using our example above we would run: + + `git push origin v0.1.0` + + (Or you can push all tags: `git push --tags`.) + +3. Create a package. + + We recommend working in a new branch before creating a package. For example if you want to call your new package `my_new_package` version `0.1.0` you could create and checkout a branch like this: + + `git checkout -b my_new_package-0.1.0` + + Now follow the instructions in the [Getting started](#getting-started) section for creating a new package. + +4. Test your package. + + Even though we will eventually build the package using Travis, it's a good idea to build locally to check for errors. + + `./mason build my_new_package 0.1.0` + +5. Push changes to remote. + + Once you can build, push your changes up to Github remote so that Travis will know what to build and publish in the next step. + + `git push origin my_new_package-0.1.0` + +6. Build and Publish your package. + + Use Mason's `trigger` command to tell Travis to build, test, and publish your new package to the S3 bucket specified in `mason.sh`. + + `./mason trigger my_new_package 0.1.0` + +7. Check S3 to verify whether your package exists. + +## Using a package + +Mason has two clients for installing and working with packages: -#### Script structure +* **Mason cli** - comes bundled with the Mason project, see [Usage](#usage) for commands -This `script.sh` is structured like: + For example [hpp-skel](https://github.com/mapbox/hpp-skel) uses the Mason cli client and requires that the Mason version in [scripts/setup.sh](https://github.com/mapbox/hpp-skel/blob/044187fdfc441cf9db57a3c1b03972eee6882a9b/scripts/setup.sh#L6) be updated in order to stay up-to-date with the latest available packages. + +* **[mason-js](https://github.com/mapbox/mason-js)** - a separate Node.js client with its own installation and usage instructions + + For example [node-cpp-skel](https://github.com/mapbox/node-cpp-skel) uses the mason-js client and pulls packages directly from S3. + +_Note: The install command syntax will differ depending on the client you use._ + +## Mason internals + +### Mason scripts + +The `script.sh` file in each package is structured like the following example: ```bash #!/usr/bin/env bash +# This is required for every package. MASON_NAME=libuv MASON_VERSION=0.11.29 -MASON_LIB_FILE=lib/libuv.a -MASON_PKGCONFIG_FILE=lib/pkgconfig/libuv.pc -``` -Declare these variables first. `MASON_NAME` and `MASON_VERSION` are mandatory. If the install script build a static library, specify the relative path in the installation directory in `MASON_LIB_FILE`. This is used to check whether an installation actually exists before proceeding to download/build the library anew. You can optionally specify `MASON_PKGCONFIG_FILE` as the relative path to the pig-config file if the library has one. If the library doesn't have one, you need to override the functions `mason_cflags` and `mason_ldflags` (see below). +# This is required if Mason will need to build a static library. Specify the relative path in the +# installation directory. +MASON_LIB_FILE=lib/libuv.a -Then, we're loading the build system with +# You can specify the relative path to the pkg-config file if Mason needs to build your code before +# packaging. If the library doesn't have a pkg-config file, you will need to override the functions +# `mason_cflags` and `mason_ldflags`. +MASON_PKGCONFIG_FILE=lib/pkgconfig/libuv.pc -```bash +# This is required when you need to load the build system to build your code before packaging. You +# con't need this line if you are packaging header-only code. . ${MASON_DIR}/mason.sh -``` - -Next, we're defining a function that obtains the source code and unzips it: -```bash +# Overriding this Mason function is required for all pakcages so Mason knows where to obtain your +# source code. This function also caches downloaded tarballs in the mason_packages/.cache folder. function mason_load_source { mason_download \ https://github.com/joyent/libuv/archive/v0.11.29.tar.gz \ 5bf49a8652f680557cbaf335a160187b2da3bf7f + # This unpacks the archive into the `mason_packages/.build` folder. If the tarball is BZip2 + # compressed, you can also use `mason_extract_tar_bz2` instead. mason_extract_tar_gz - export MASON_BUILD_PATH=${MASON_ROOT}/.build/libuv-${MASON_VERSION} + # This variable contains the path to the unpacked folder inside the `.build` directory. + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} } -``` - -In that function, you should use `mason_download` as a shortcut to download the tarball. The second argument to is a hash generated with `git hash-object` and used to verify that the source code downloaded matches the expected file. The function also caches downloaded tarballs in the `mason_packages/.cache` folder. You can get the hash object by running `./mason build ` locally and this should fail with a message of the expected hash. - -`mason_extract_tar_gz` unpacks the archive into the `mason_packages/.build` folder. If the tarball is BZip2 compressed, you can also use `mason_extract_tar_bz2` instead. -Lastly, the `MASON_BUILD_PATH` variable contains the path to the unpacked folder inside the `.build` directory. - -Then, you can optionally specify a function that is run before compiling, e.g. a script that generates configuration files: - -```bash +# Override this Mason function if you need to run code before compiling, e.g. a script that +# generates configuration files. function mason_prepare_compile { ./autogen.sh } -``` - -The heart of the script is the `mason_compile` function because it performs the actual build of the source code. There are a few variables available that you need to use to make sure that the package will work correctly. -```bash +# It is required to override the `mason_compile` function because it performs the actual build of +# the source code (or just copies header files into a package folder to be published later for +# header-only code). This is an example of how you would configure and make a static library. function mason_compile { + # You must set the build system's installation prefix to `MASON_PREFIX`. For cross-platform + # builds, you have to specify the `MASON_HOST_ARG`, which is empty for regular builds and is set + # to the correct host platform for cross-compiles e.g. iOS builds use `--host=arm-apple-darwin`. ./configure \ --prefix=${MASON_PREFIX} \ ${MASON_HOST_ARG} \ @@ -373,36 +513,29 @@ function mason_compile { --disable-shared \ --disable-dependency-tracking + # If the build system supports building concurrently, you can tell it do do so by providing the + # number of parallel tasks from `MASON_CONCURRENCY`. make install -j${MASON_CONCURRENCY} } -``` - -In particular, you have to set the build system's installation prefix to `MASON_PREFIX`. For cross-platform builds, you have to specify the `MASON_HOST_ARG`, which is empty for regular builds and is set to the correct host platform for cross-compiles (e.g. iOS builds use `--host=arm-apple-darwin`). - -If the build system supports building concurrently, you can tell it do do so by providing the number of parallel tasks from `MASON_CONCURRENCY`. - - -Next, the `mason_clean` function tells Mason how to clean up the build folder. This is required for multi-architecture builds. E.g. iOS builds perform a Simulator (Intel architecture) build first, then an iOS (ARM architecture) build. The results are `lipo`ed into one universal archive file. -```bash +# Tell Mason how to clean up the build folder. This is required for multi-architecture builds. e.g. +# iOS builds perform a Simulator (Intel architecture) build first, then an iOS (ARM architecture) +# build. The results are `lipo`ed into one universal archive file. function mason_clean { make clean } -``` - -Finally, we're going to run the everything: -```bash +# Run everything. mason_run "$@" ``` -### Variables +### Mason variables Name | Description ---|--- `MASON_DIR` | The directory where Mason itself is installed. Defaults to the current directory. `MASON_ROOT` | Absolute path the `mason_packages` directory. Example: `/Users/user/mason_packages`. -`MASON_PLATFORM` | Platform of the current invocation. Currently one of `osx`, `ios` or `linux`. +`MASON_PLATFORM` | Platform of the current invocation. Currently one of `osx`, `ios`, `android`, or `linux`. `MASON_PLATFORM_VERSION` | Version of the platform. It must include the architecture if the produced binaries are architecture-specific (e.g. on Linux). Example: `10.10` `MASON_NAME` | Name specified in the `script.sh` file. Example: `libuv` `MASON_VERSION` | Version specified in the `script.sh` file. Example: `0.11.29` @@ -419,71 +552,14 @@ Name | Description `MASON_XCODE_ROOT` | OS X specific; Path to the Xcode Developer directory. Example: `/Applications/Xcode.app/Contents/Developer` `MASON_HEADER_ONLY` | Set to `true` to specify this library as header-only, which bypasses building binaries (default `false`) -### Customization - -In addition to the override functions described above, you can also override the `mason_cflags` and `mason_ldflags` functions. By default, they're using `pkg-config` to determine these flags and print them to standard output. If a library doesn't include a `.pc` file, or has some other mechanism for determining the build flags, you can run them instead: - - -```bash -function mason_ldflags { - ${MASON_PREFIX}/bin/curl-config --static-libs` -} -``` - -### System packages - -Some packages ship with operating systems, or can be easily installed with operating-specific package managers. For example, `libpng` is available on most systems and the version you're using doesn't really matter since it is mature and hasn't added any significant new APIs in recent years. To create a system package for it, use this `script.sh` file: - - -```bash -#!/usr/bin/env bash - -MASON_NAME=libpng -MASON_VERSION=system -MASON_SYSTEM_PACKAGE=true - -. ${MASON_DIR}/mason.sh - -if [ ! $(pkg-config libpng --exists; echo $?) = 0 ]; then - mason_error "Cannot find libpng with pkg-config" - exit 1 -fi - -function mason_system_version { - mkdir -p "${MASON_PREFIX}" - cd "${MASON_PREFIX}" - if [ ! -f version ]; then - echo "#include -#include -#include -int main() { - assert(PNG_LIBPNG_VER == png_access_version_number()); - printf(\"%s\", PNG_LIBPNG_VER_STRING); - return 0; -} -" > version.c && ${CC:-cc} version.c $(mason_cflags) $(mason_ldflags) -o version - fi - ./version -} - -function mason_compile { - : -} +### Mason functions -function mason_cflags { - pkg-config libpng --cflags -} - -function mason_ldflags { - pkg-config libpng --libs -} +These are common Mason function that you might need to override in your package's `script.sh` file depending on the type of library you are packaging. See https://github.com/mapbox/mason/blob/master/mason.sh to view how these functions are implemented by default. There you will find even more `mason_`-functions that you might find useful to override. -mason_run "$@" -``` - -System packages are marked with `MASON_SYSTEM_PACKAGE=true`. We're also first using `pkg-config` to check whether the library is present at all. The `mason_system_version` function creates a small executable which outputs the actual version. It is the only thing that is cached in the installation directory. - -We have to override the `mason_cflags` and `mason_ldflags` commands since the regular commands return flags for static libraries, but in the case of system packages, we want to dynamically link against the package. + - `mason_pkgconfig` + - `mason_cflags` + - `mason_ldflags` + - `mason_static_libs` ## Troubleshooting diff --git a/mason b/mason index 103377139..20d182355 100755 --- a/mason +++ b/mason @@ -7,7 +7,7 @@ MASON_VERSION=$1 ; shift set -eu set -o pipefail -MASON_RELEASED_VERSION="0.18.0" +MASON_RELEASED_VERSION="0.23.0" if [ "${MASON_COMMAND}" = "--version" ]; then echo ${MASON_RELEASED_VERSION} @@ -23,14 +23,17 @@ if [ -z "${MASON_COMMAND}" ]; then exit 1 fi -ROOTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" - -if test -L "$ROOTDIR/mason"; then - ROOTDIR="$(dirname "$(readlink "$ROOTDIR/mason")")" +if [ -z "${MASON_DIR:+isset}" ]; then + MASON_DIR=$( + cd "$(dirname "${BASH_SOURCE}")" && + if [ -L mason ]; then + possibly_relative_path=$(readlink mason) && + cd "$(dirname "${possibly_relative_path}")" + fi && + pwd + ) fi -MASON_DIR=${MASON_DIR:-$ROOTDIR} - if [ "${MASON_COMMAND}" = "env" ]; then . ${MASON_DIR}/mason.sh > /dev/null eval echo \$${MASON_NAME} @@ -57,6 +60,8 @@ if [ "${MASON_COMMAND}" = "trigger" ]; then require 'yaml' require 'json' config = YAML.load_file("scripts/${MASON_NAME}/${MASON_VERSION}/.travis.yml") + config["merge_mode"] = "replace" + config["version"] = "~> 1.0" config["env"] ||= {} config["env"]["global"] ||= [] config["env"]["global"] << "MASON_NAME=${MASON_NAME}" << "MASON_VERSION=${MASON_VERSION}" @@ -72,14 +77,14 @@ RUBY } }" - curl -s -X POST https://api.travis-ci.org/repo/mapbox%2Fmason/requests \ + curl -s -X POST https://api.travis-ci.com/repo/mapbox%2Fmason/requests \ -H "Content-Type: application/json" \ -H "Accept: application/json" \ -H "Travis-API-Version: 3" \ -H "Authorization: token ${MASON_TRAVIS_TOKEN}" \ -d "$body" - echo "Now go to https://travis-ci.org/mapbox/mason/builds to view build status" + echo "Now go to https://travis-ci.com/mapbox/mason/builds to view build status" exit fi diff --git a/mason.sh b/mason.sh index 1aebc57e7..d2205aeca 100644 --- a/mason.sh +++ b/mason.sh @@ -14,8 +14,8 @@ elif [ "${MASON_UNAME}" = 'Linux' ]; then MASON_PLATFORM=${MASON_PLATFORM:-linux} fi -# In non-interactive environments like Travis CI, we can't use -s because it'll fill up the log -# way too fast +# In non-interactive environments like Travis CI, we use -s (--silent) +# because otherwise it will quickly fill up the log case $- in *i*) MASON_CURL_ARGS= ;; # interactive *) MASON_CURL_ARGS=-s ;; # non-interative @@ -23,14 +23,14 @@ esac case ${MASON_UNAME} in 'Darwin') MASON_CONCURRENCY=$(sysctl -n hw.ncpu) ;; - 'Linux') MASON_CONCURRENCY=$(lscpu -p | egrep -v '^#' | wc -l) ;; + 'Linux') MASON_CONCURRENCY=$(grep -c ^processor /proc/cpuinfo) ;; *) MASON_CONCURRENCY=1 ;; esac -function mason_step { >&2 echo -e "\033[1m\033[36m* $1\033[0m"; } -function mason_substep { >&2 echo -e "\033[1m\033[36m* $1\033[0m"; } -function mason_success { >&2 echo -e "\033[1m\033[32m* $1\033[0m"; } -function mason_error { >&2 echo -e "\033[1m\033[31m$1\033[0m"; } +function mason_step { >&2 printf "\033[1;36m%s\033[0m\n" "* $*"; } +function mason_substep { >&2 printf "\033[0;36m%s\033[0m\n" "* $*"; } +function mason_success { >&2 printf "\033[1;32m%s\033[0m\n" "* $*"; } +function mason_error { >&2 printf "\033[1;31m%s\033[0m\n" "$*"; } case ${MASON_ROOT} in @@ -262,6 +262,7 @@ fi MASON_PREFIX=${MASON_ROOT}/${MASON_PLATFORM_ID}/${MASON_NAME}/${MASON_VERSION} MASON_BINARIES=${MASON_PLATFORM_ID}/${MASON_NAME}/${MASON_VERSION}.tar.gz MASON_BINARIES_PATH=${MASON_ROOT}/.binaries/${MASON_BINARIES} +MASON_BINARIES_URL="https://${MASON_BUCKET}.s3.amazonaws.com/${MASON_BINARIES}" @@ -275,7 +276,9 @@ function mason_check_existing { fi elif [ "${MASON_SYSTEM_PACKAGE:-false}" = true ]; then if [ -f "${MASON_PREFIX}/version" ] ; then - mason_success "Using system-provided ${MASON_NAME} $(set -e;mason_system_version)" + local version # no assignment, local ignores exit status from $(...) + version=$(set -eu; mason_system_version) + mason_success "Using system-provided ${MASON_NAME} ${version}" exit 0 fi else @@ -306,20 +309,33 @@ function mason_check_installed { function mason_clear_existing { - if [ -d "${MASON_PREFIX}" ]; then + if [ -e "${MASON_PREFIX}" ] || [ -h "${MASON_PREFIX}" ]; then mason_step "Removing existing package... ${MASON_PREFIX}" rm -rf "${MASON_PREFIX}" fi } +function mason_curl { + curl -f -L ${MASON_CURL_ARGS} "$@" + # + # -f, --fail + # (HTTP) Fail silently (no output at all) on server errors. + # + # -L, --location + # (HTTP) If the server reports that the requested page has + # moved to a different location, this option will make curl + # redo the request on the new place. +} + + function mason_download { mkdir -p "${MASON_ROOT}/.cache" cd "${MASON_ROOT}/.cache" if [ ! -f "${MASON_SLUG}" ] ; then mason_step "Downloading $1..." local CURL_RESULT=0 - curl --retry 3 ${MASON_CURL_ARGS} -f -S -L "$1" -o "${MASON_SLUG}" || CURL_RESULT=$? + mason_curl -S --retry 3 "$1" -o "${MASON_SLUG}" || CURL_RESULT=$? if [[ ${CURL_RESULT} != 0 ]]; then mason_error "Failed to download ${1} (returncode: $CURL_RESULT)" exit $CURL_RESULT @@ -368,24 +384,34 @@ function mason_clean { } function bash_lndir() { - oldifs=$IFS - IFS=' - ' - src=$(cd "$1" ; pwd) - dst=$(cd "$2" ; pwd) - find "$src" -type d | - while read -r dir; do - mkdir -p "$dst${dir#$src}" - done - - find "$src" -type f -o -type l | - while read -r src_f; do - dst_f="$dst${src_f#$src}" - if [[ ! -e $dst_f ]]; then - ln -s "$src_f" "$dst_f" - fi - done - IFS=$oldifs + local src dst + src=$(cd -- "$1" && pwd) + dst=$(cd -- "$2" && pwd) + + find "$src" -mindepth 1 -type d -exec \ + /usr/bin/env src="$src" dst="$dst" bash -c \ + ' + mkdir -p -- "${@/#"$src"/$dst}" + for src_dir + do + dst_dir="${src_dir/#"$src"/$dst}" + export dst_dir + + # OSX ln does not support -t (--target-directory), + # that is why -exec sh -c "ln -sf -- files... dir" + # is used instead of -exec ln -sft dir files... + + find "$src_dir" -maxdepth 1 \ + \( -type f -o -type l \) \ + \! -name "*~" \ + -exec /bin/sh -c \ + " + ln -sf -- \"\$@\" \"\$dst_dir/\" + + " sh_lnfiles "{""}" + + done + + ' bash_lndir '{}' + } @@ -394,11 +420,17 @@ function run_lndir() { #/bin/cp -R -n ${MASON_PREFIX}/* ${TARGET_SUBDIR} mason_step "Linking ${MASON_PREFIX}" mason_step "Links will be inside ${TARGET_SUBDIR}" - if hash lndir 2>/dev/null; then - mason_substep "Using $(which lndir) for symlinking" - lndir -silent "${MASON_PREFIX}/" "${TARGET_SUBDIR}" 2>/dev/null + local cp_help=$(cp --help 2>/dev/null) + if [[ $cp_help =~ [[:space:]]--symbolic-link[[:space:]] && + $cp_help =~ [[:space:]]--target-directory= ]] + then + mason_substep "Using 'cp' for symlinking" + find "${MASON_PREFIX}" -mindepth 1 -type d -prune -exec \ + cp -RPfp --symbolic-link \ + --target-directory="${TARGET_SUBDIR}" \ + -- '{}' + else - mason_substep "Using bash fallback for symlinking (install lndir for faster symlinking)" + mason_substep "Using bash fallback for symlinking (GNU cp needed for faster symlinking)" bash_lndir "${MASON_PREFIX}/" "${TARGET_SUBDIR}" fi mason_step "Done linking ${MASON_PREFIX}" @@ -505,6 +537,7 @@ function mason_config { echo "platform=${MASON_PLATFORM}" echo "platform_version=${MASON_PLATFORM_VERSION}" fi + echo "root=${MASON_ROOT}" mason_config_custom for name in include_dirs definitions options ldflags static_libs ; do eval value=\$MASON_CONFIG_$(echo ${name} | tr '[:lower:]' '[:upper:]') @@ -526,19 +559,18 @@ function mason_write_config { function mason_try_binary { MASON_BINARIES_DIR=$(dirname "${MASON_BINARIES}") mkdir -p "${MASON_ROOT}/.binaries/${MASON_BINARIES_DIR}" - local FULL_URL="https://${MASON_BUCKET}.s3.amazonaws.com/${MASON_BINARIES}" # try downloading from S3 if [ ! -f "${MASON_BINARIES_PATH}" ] ; then - mason_step "Downloading binary package ${FULL_URL}" + mason_step "Downloading binary package ${MASON_BINARIES_URL}" local CURL_RESULT=0 local HTTP_RETURN=0 - HTTP_RETURN=$(curl -w "%{http_code}" --retry 3 ${MASON_CURL_ARGS} -f -L ${FULL_URL} -o "${MASON_BINARIES_PATH}.tmp") || CURL_RESULT=$? + HTTP_RETURN=$(mason_curl -w "%{http_code}" --retry 3 "${MASON_BINARIES_URL}" -o "${MASON_BINARIES_PATH}.tmp") || CURL_RESULT=$? if [[ ${CURL_RESULT} != 0 ]]; then if [[ ${HTTP_RETURN} == "403" ]]; then - mason_step "Binary not available for ${FULL_URL}" + mason_step "Binary not available for ${MASON_BINARIES_URL}" else - mason_error "Failed to download ${FULL_URL} (returncode: ${CURL_RESULT})" + mason_error "Failed to download ${MASON_BINARIES_URL} (returncode: ${CURL_RESULT})" exit $CURL_RESULT fi else @@ -548,7 +580,7 @@ function mason_try_binary { mason_step "Updating binary package ${MASON_BINARIES}..." local CURL_RESULT=0 local HTTP_RETURN=0 - HTTP_RETURN=$(curl -w "%{http_code}" --retry 3 ${MASON_CURL_ARGS} -f -L -z "${MASON_BINARIES_PATH}" -o "${MASON_BINARIES_PATH}.tmp") || CURL_RESULT=$? + HTTP_RETURN=$(mason_curl -w "%{http_code}" --retry 3 -z "${MASON_BINARIES_PATH}" "${MASON_BINARIES_URL}" -o "${MASON_BINARIES_PATH}.tmp") || CURL_RESULT=$? if [[ ${CURL_RESULT} != 0 ]]; then if [ -f "${MASON_BINARIES_PATH}.tmp" ]; then mv "${MASON_BINARIES_PATH}.tmp" "${MASON_BINARIES_PATH}" @@ -569,15 +601,27 @@ function mason_try_binary { # to the current user using fakeroot if available $(which fakeroot) tar xzf "${MASON_BINARIES_PATH}" - if [ ! -z "${MASON_PKGCONFIG_FILE:-}" ] ; then - if [ -f "${MASON_PREFIX}/${MASON_PKGCONFIG_FILE}" ] ; then - # Change the prefix - MASON_ESCAPED_PREFIX=$(echo "${MASON_PREFIX}" | sed -e 's/[\/&]/\\&/g') - sed -i.bak "s/prefix=.*/prefix=${MASON_ESCAPED_PREFIX}/" \ - "${MASON_PREFIX}/${MASON_PKGCONFIG_FILE}" - fi + local bdist_root= match replace + + if [ -f "${MASON_PREFIX}/mason.ini" ]; then + bdist_root=$(sed -ne 's|^root=||p' "${MASON_PREFIX}/mason.ini") fi + # fallback for packages without root=/path/to/mason_packages in mason.ini + case ${#bdist_root},${MASON_PLATFORM} in + 0,ios| \ + 0,osx) bdist_root="/Users/travis/build/mapbox/mason/mason_packages" ;; + 0,* ) bdist_root="/home/travis/build/mapbox/mason/mason_packages" ;; + esac + + match=$(sed 's:[][$*.^\\&]:\\&:g' <<< ${bdist_root}) + replace=$(sed 's:[\\&]:\\&:g' <<< ${MASON_ROOT}) + + # fixup libtool .la and pkgconfig .pc files + find "${MASON_PREFIX}" -name include -prune -o -type f \ + \( -name '*.la' -o -path '*/pkgconfig/*.pc' \) \ + -exec sed -i.bak "s&${match}&${replace}&g" '{}' + + mason_success "Installed binary package at ${MASON_PREFIX}" exit 0 fi @@ -695,15 +739,15 @@ function mason_publish { MD5="$(openssl md5 -binary < "${MASON_BINARIES_PATH}" | base64)" SIGNATURE="$(printf "PUT\n$MD5\n$CONTENT_TYPE\n$DATE\nx-amz-acl:public-read\n/${MASON_BUCKET}/${MASON_BINARIES}" | openssl sha1 -binary -hmac "$AWS_SECRET_ACCESS_KEY" | base64)" - curl -S -T "${MASON_BINARIES_PATH}" "https://${MASON_BUCKET}.s3.amazonaws.com/${MASON_BINARIES}" \ + curl -S -T "${MASON_BINARIES_PATH}" "${MASON_BINARIES_URL}" \ -H "Date: $DATE" \ -H "Authorization: AWS $AWS_ACCESS_KEY_ID:$SIGNATURE" \ -H "Content-Type: $CONTENT_TYPE" \ -H "Content-MD5: $MD5" \ -H "x-amz-acl: public-read" - echo "https://${MASON_BUCKET}.s3.amazonaws.com/${MASON_BINARIES}" - curl -f -I "https://${MASON_BUCKET}.s3.amazonaws.com/${MASON_BINARIES}" + echo "${MASON_BINARIES_URL}" + curl -f -I "${MASON_BINARIES_URL}" } function mason_run { @@ -713,7 +757,9 @@ function mason_run { mason_clear_existing mason_build mason_write_config - mason_success "Installed system-provided ${MASON_NAME} $(set -e;mason_system_version)" + local version # no assignment, local ignores exit status from $(...) + version=$(set -eu; mason_system_version) + mason_success "Installed system-provided ${MASON_NAME} ${version}" else mason_check_existing mason_clear_existing diff --git a/scripts/afl/2.19b/script.sh b/scripts/afl/2.19b/script.sh index 70da9db5a..14f1a31b5 100644 --- a/scripts/afl/2.19b/script.sh +++ b/scripts/afl/2.19b/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/afl-fuzz function mason_load_source { mason_download \ - http://lcamtuf.coredump.cx/afl/releases/afl-2.19b.tgz \ + https://lcamtuf.coredump.cx/afl/releases/afl-2.19b.tgz \ 6627c7b7c873e26fb7fbb6fd574c93676442d8b2 mason_extract_tar_gz diff --git a/scripts/android-ndk/arm-9-r10e/script.sh b/scripts/android-ndk/arm-9-r10e/script.sh index a80346975..2e11f49b5 100755 --- a/scripts/android-ndk/arm-9-r10e/script.sh +++ b/scripts/android-ndk/arm-9-r10e/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/ndk/android-ndk-r10e-darwin-x86_64.bin \ + https://dl.google.com/android/ndk/android-ndk-r10e-darwin-x86_64.bin \ dea2dd3939eea3289cab075804abb153014b78d3 elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin \ + https://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin \ 285606ba6882d27d99ed469fc5533cc3c93967f5 fi diff --git a/scripts/android-ndk/arm-9-r11c/script.sh b/scripts/android-ndk/arm-9-r11c/script.sh index 48bbe1237..6348e4972 100755 --- a/scripts/android-ndk/arm-9-r11c/script.sh +++ b/scripts/android-ndk/arm-9-r11c/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r11c-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r11c-darwin-x86_64.zip \ 0c6fa2017dd5237f6270887c85feedc4aafb3aef elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r11c-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r11c-linux-x86_64.zip \ 0c646e2fceb3ef853e1832f4aa3a0dc4c16d2229 fi diff --git a/scripts/android-ndk/arm-9-r12b/script.sh b/scripts/android-ndk/arm-9-r12b/script.sh index 72caaa2ab..dcce4c031 100755 --- a/scripts/android-ndk/arm-9-r12b/script.sh +++ b/scripts/android-ndk/arm-9-r12b/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r12b-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r12b-darwin-x86_64.zip \ 1a3bbdde35a240086b022cdf13ddcf40c27caa6e elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r12b-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r12b-linux-x86_64.zip \ c6286e131c233c25537a306eae0a29d50b352b91 fi diff --git a/scripts/android-ndk/arm-9-r13b/script.sh b/scripts/android-ndk/arm-9-r13b/script.sh index 359f04a39..0ef6c7320 100755 --- a/scripts/android-ndk/arm-9-r13b/script.sh +++ b/scripts/android-ndk/arm-9-r13b/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r13b-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r13b-darwin-x86_64.zip \ b822dd239f63cd2e1e72c823c41bd732da2e5ad6 elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip \ b95dd1fba5096ca3310a67e90b2a5a8aca3ddec7 fi diff --git a/scripts/android-ndk/arm64-21-r10e-gcc/script.sh b/scripts/android-ndk/arm64-21-r10e-gcc/script.sh index 2b58a00a5..736d2eff0 100755 --- a/scripts/android-ndk/arm64-21-r10e-gcc/script.sh +++ b/scripts/android-ndk/arm64-21-r10e-gcc/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/ndk/android-ndk-r10e-darwin-x86_64.bin \ + https://dl.google.com/android/ndk/android-ndk-r10e-darwin-x86_64.bin \ dea2dd3939eea3289cab075804abb153014b78d3 elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin \ + https://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin \ 285606ba6882d27d99ed469fc5533cc3c93967f5 fi diff --git a/scripts/android-ndk/arm64-21-r10e/script.sh b/scripts/android-ndk/arm64-21-r10e/script.sh index 94ac8eecd..ef001354c 100755 --- a/scripts/android-ndk/arm64-21-r10e/script.sh +++ b/scripts/android-ndk/arm64-21-r10e/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/ndk/android-ndk-r10e-darwin-x86_64.bin \ + https://dl.google.com/android/ndk/android-ndk-r10e-darwin-x86_64.bin \ dea2dd3939eea3289cab075804abb153014b78d3 elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin \ + https://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin \ 285606ba6882d27d99ed469fc5533cc3c93967f5 fi diff --git a/scripts/android-ndk/arm64-21-r11c/script.sh b/scripts/android-ndk/arm64-21-r11c/script.sh index 1f145e728..df56159d8 100755 --- a/scripts/android-ndk/arm64-21-r11c/script.sh +++ b/scripts/android-ndk/arm64-21-r11c/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r11c-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r11c-darwin-x86_64.zip \ 0c6fa2017dd5237f6270887c85feedc4aafb3aef elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r11c-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r11c-linux-x86_64.zip \ 0c646e2fceb3ef853e1832f4aa3a0dc4c16d2229 fi diff --git a/scripts/android-ndk/arm64-21-r12b/script.sh b/scripts/android-ndk/arm64-21-r12b/script.sh index bfba85293..ea384bad1 100755 --- a/scripts/android-ndk/arm64-21-r12b/script.sh +++ b/scripts/android-ndk/arm64-21-r12b/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r12b-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r12b-darwin-x86_64.zip \ 1a3bbdde35a240086b022cdf13ddcf40c27caa6e elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r12b-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r12b-linux-x86_64.zip \ c6286e131c233c25537a306eae0a29d50b352b91 fi diff --git a/scripts/android-ndk/arm64-21-r13b/script.sh b/scripts/android-ndk/arm64-21-r13b/script.sh index d075c8af4..5c680cdf5 100755 --- a/scripts/android-ndk/arm64-21-r13b/script.sh +++ b/scripts/android-ndk/arm64-21-r13b/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r13b-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r13b-darwin-x86_64.zip \ b822dd239f63cd2e1e72c823c41bd732da2e5ad6 elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip \ b95dd1fba5096ca3310a67e90b2a5a8aca3ddec7 fi diff --git a/scripts/android-ndk/mips-9-r10e/script.sh b/scripts/android-ndk/mips-9-r10e/script.sh index 1bf5c5aec..6e8b99b3b 100755 --- a/scripts/android-ndk/mips-9-r10e/script.sh +++ b/scripts/android-ndk/mips-9-r10e/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/ndk/android-ndk-r10e-darwin-x86_64.bin \ + https://dl.google.com/android/ndk/android-ndk-r10e-darwin-x86_64.bin \ dea2dd3939eea3289cab075804abb153014b78d3 elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin \ + https://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin \ 285606ba6882d27d99ed469fc5533cc3c93967f5 fi diff --git a/scripts/android-ndk/mips-9-r11c/script.sh b/scripts/android-ndk/mips-9-r11c/script.sh index 58c99a765..123322d58 100755 --- a/scripts/android-ndk/mips-9-r11c/script.sh +++ b/scripts/android-ndk/mips-9-r11c/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r11c-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r11c-darwin-x86_64.zip \ 0c6fa2017dd5237f6270887c85feedc4aafb3aef elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r11c-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r11c-linux-x86_64.zip \ 0c646e2fceb3ef853e1832f4aa3a0dc4c16d2229 fi diff --git a/scripts/android-ndk/mips-9-r12b/script.sh b/scripts/android-ndk/mips-9-r12b/script.sh index d80d9ed12..73893a6a9 100755 --- a/scripts/android-ndk/mips-9-r12b/script.sh +++ b/scripts/android-ndk/mips-9-r12b/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r12b-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r12b-darwin-x86_64.zip \ 1a3bbdde35a240086b022cdf13ddcf40c27caa6e elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r12b-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r12b-linux-x86_64.zip \ c6286e131c233c25537a306eae0a29d50b352b91 fi diff --git a/scripts/android-ndk/mips64-21-r10e/script.sh b/scripts/android-ndk/mips64-21-r10e/script.sh index 92f313cdb..204214b20 100755 --- a/scripts/android-ndk/mips64-21-r10e/script.sh +++ b/scripts/android-ndk/mips64-21-r10e/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/ndk/android-ndk-r10e-darwin-x86_64.bin \ + https://dl.google.com/android/ndk/android-ndk-r10e-darwin-x86_64.bin \ dea2dd3939eea3289cab075804abb153014b78d3 elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin \ + https://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin \ 285606ba6882d27d99ed469fc5533cc3c93967f5 fi diff --git a/scripts/android-ndk/mips64-21-r11c/script.sh b/scripts/android-ndk/mips64-21-r11c/script.sh index 229fcea0c..fd50778df 100755 --- a/scripts/android-ndk/mips64-21-r11c/script.sh +++ b/scripts/android-ndk/mips64-21-r11c/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r11c-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r11c-darwin-x86_64.zip \ 0c6fa2017dd5237f6270887c85feedc4aafb3aef elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r11c-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r11c-linux-x86_64.zip \ 0c646e2fceb3ef853e1832f4aa3a0dc4c16d2229 fi diff --git a/scripts/android-ndk/mips64-21-r12b/script.sh b/scripts/android-ndk/mips64-21-r12b/script.sh index 32c15e0a2..c91436947 100755 --- a/scripts/android-ndk/mips64-21-r12b/script.sh +++ b/scripts/android-ndk/mips64-21-r12b/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r12b-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r12b-darwin-x86_64.zip \ 1a3bbdde35a240086b022cdf13ddcf40c27caa6e elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r12b-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r12b-linux-x86_64.zip \ c6286e131c233c25537a306eae0a29d50b352b91 fi diff --git a/scripts/android-ndk/mips64-21-r13b/script.sh b/scripts/android-ndk/mips64-21-r13b/script.sh index 3ed7341c9..838aebbbb 100755 --- a/scripts/android-ndk/mips64-21-r13b/script.sh +++ b/scripts/android-ndk/mips64-21-r13b/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r13b-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r13b-darwin-x86_64.zip \ b822dd239f63cd2e1e72c823c41bd732da2e5ad6 elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip \ b95dd1fba5096ca3310a67e90b2a5a8aca3ddec7 fi diff --git a/scripts/android-ndk/script-r13b.sh b/scripts/android-ndk/script-r13b.sh index d831f5a4d..df188a0b8 100644 --- a/scripts/android-ndk/script-r13b.sh +++ b/scripts/android-ndk/script-r13b.sh @@ -7,11 +7,11 @@ export MASON_ANDROID_NDK_API_LEVEL=${MASON_ANDROID_PLATFORM_VERSION##*-} function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-${MASON_ANDROID_NDK_VERSION}-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-${MASON_ANDROID_NDK_VERSION}-darwin-x86_64.zip \ b822dd239f63cd2e1e72c823c41bd732da2e5ad6 elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-${MASON_ANDROID_NDK_VERSION}-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-${MASON_ANDROID_NDK_VERSION}-linux-x86_64.zip \ b95dd1fba5096ca3310a67e90b2a5a8aca3ddec7 fi diff --git a/scripts/android-ndk/script-r14.sh b/scripts/android-ndk/script-r14.sh index b2f0eb7e4..149242092 100644 --- a/scripts/android-ndk/script-r14.sh +++ b/scripts/android-ndk/script-r14.sh @@ -7,11 +7,11 @@ export MASON_ANDROID_NDK_API_LEVEL=${MASON_ANDROID_PLATFORM_VERSION##*-} function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-${MASON_ANDROID_NDK_VERSION}-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-${MASON_ANDROID_NDK_VERSION}-darwin-x86_64.zip \ 8ace11815fb839a94ee5601c523ddb629f46232b elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-${MASON_ANDROID_NDK_VERSION}-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-${MASON_ANDROID_NDK_VERSION}-linux-x86_64.zip \ a8eca41e6a8d6f8776834261d3d0363b9a1abe19 fi diff --git a/scripts/android-ndk/script-r16b.sh b/scripts/android-ndk/script-r16b.sh index cc31bbd3e..03927e430 100644 --- a/scripts/android-ndk/script-r16b.sh +++ b/scripts/android-ndk/script-r16b.sh @@ -7,11 +7,11 @@ export MASON_ANDROID_NDK_API_LEVEL=${MASON_ANDROID_PLATFORM_VERSION##*-} function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-${MASON_ANDROID_NDK_VERSION}-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-${MASON_ANDROID_NDK_VERSION}-darwin-x86_64.zip \ 63464a54737c506ba86a7bb534a927b66c425f2c elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-${MASON_ANDROID_NDK_VERSION}-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-${MASON_ANDROID_NDK_VERSION}-linux-x86_64.zip \ 73ac466861f1a8035ad0abe608be2219a212540f fi diff --git a/scripts/android-ndk/x86-9-r10e/script.sh b/scripts/android-ndk/x86-9-r10e/script.sh index c82f4de87..e34a8486a 100755 --- a/scripts/android-ndk/x86-9-r10e/script.sh +++ b/scripts/android-ndk/x86-9-r10e/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/ndk/android-ndk-r10e-darwin-x86_64.bin \ + https://dl.google.com/android/ndk/android-ndk-r10e-darwin-x86_64.bin \ dea2dd3939eea3289cab075804abb153014b78d3 elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin \ + https://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin \ 285606ba6882d27d99ed469fc5533cc3c93967f5 fi diff --git a/scripts/android-ndk/x86-9-r11c/script.sh b/scripts/android-ndk/x86-9-r11c/script.sh index f85b086f0..c8f25f325 100755 --- a/scripts/android-ndk/x86-9-r11c/script.sh +++ b/scripts/android-ndk/x86-9-r11c/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r11c-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r11c-darwin-x86_64.zip \ 0c6fa2017dd5237f6270887c85feedc4aafb3aef elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r11c-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r11c-linux-x86_64.zip \ 0c646e2fceb3ef853e1832f4aa3a0dc4c16d2229 fi diff --git a/scripts/android-ndk/x86-9-r12b/script.sh b/scripts/android-ndk/x86-9-r12b/script.sh index 4bb762bbd..9fa45ae75 100755 --- a/scripts/android-ndk/x86-9-r12b/script.sh +++ b/scripts/android-ndk/x86-9-r12b/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r12b-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r12b-darwin-x86_64.zip \ 1a3bbdde35a240086b022cdf13ddcf40c27caa6e elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r12b-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r12b-linux-x86_64.zip \ c6286e131c233c25537a306eae0a29d50b352b91 fi diff --git a/scripts/android-ndk/x86_64-21-r10e/script.sh b/scripts/android-ndk/x86_64-21-r10e/script.sh index 353cb7ab7..bf72bb7d6 100755 --- a/scripts/android-ndk/x86_64-21-r10e/script.sh +++ b/scripts/android-ndk/x86_64-21-r10e/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/ndk/android-ndk-r10e-darwin-x86_64.bin \ + https://dl.google.com/android/ndk/android-ndk-r10e-darwin-x86_64.bin \ dea2dd3939eea3289cab075804abb153014b78d3 elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin \ + https://dl.google.com/android/ndk/android-ndk-r10e-linux-x86_64.bin \ 285606ba6882d27d99ed469fc5533cc3c93967f5 fi diff --git a/scripts/android-ndk/x86_64-21-r11c/script.sh b/scripts/android-ndk/x86_64-21-r11c/script.sh index 1ad62bfe6..93b1e7669 100755 --- a/scripts/android-ndk/x86_64-21-r11c/script.sh +++ b/scripts/android-ndk/x86_64-21-r11c/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r11c-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r11c-darwin-x86_64.zip \ 0c6fa2017dd5237f6270887c85feedc4aafb3aef elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r11c-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r11c-linux-x86_64.zip \ 0c646e2fceb3ef853e1832f4aa3a0dc4c16d2229 fi diff --git a/scripts/android-ndk/x86_64-21-r12b/script.sh b/scripts/android-ndk/x86_64-21-r12b/script.sh index 1e3785607..4df2ea62f 100755 --- a/scripts/android-ndk/x86_64-21-r12b/script.sh +++ b/scripts/android-ndk/x86_64-21-r12b/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r12b-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r12b-darwin-x86_64.zip \ 1a3bbdde35a240086b022cdf13ddcf40c27caa6e elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r12b-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r12b-linux-x86_64.zip \ c6286e131c233c25537a306eae0a29d50b352b91 fi diff --git a/scripts/android-ndk/x86_64-21-r13b/script.sh b/scripts/android-ndk/x86_64-21-r13b/script.sh index d589e3511..b0c7a2cb1 100755 --- a/scripts/android-ndk/x86_64-21-r13b/script.sh +++ b/scripts/android-ndk/x86_64-21-r13b/script.sh @@ -9,11 +9,11 @@ MASON_LIB_FILE= function mason_load_source { if [ ${MASON_PLATFORM} = 'osx' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r13b-darwin-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r13b-darwin-x86_64.zip \ b822dd239f63cd2e1e72c823c41bd732da2e5ad6 elif [ ${MASON_PLATFORM} = 'linux' ]; then mason_download \ - http://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip \ + https://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip \ b95dd1fba5096ca3310a67e90b2a5a8aca3ddec7 fi diff --git a/scripts/apitrace/2018-05-16-7fadfba/.travis.yml b/scripts/apitrace/2018-05-16-7fadfba/.travis.yml new file mode 100644 index 000000000..156e282ba --- /dev/null +++ b/scripts/apitrace/2018-05-16-7fadfba/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: linux + dist: trusty + sudo: false + env: CXX=g++-5 CC=gcc-5 + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-5 + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/apitrace/2018-05-16-7fadfba/script.sh b/scripts/apitrace/2018-05-16-7fadfba/script.sh new file mode 100755 index 000000000..afef2ee8e --- /dev/null +++ b/scripts/apitrace/2018-05-16-7fadfba/script.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +MASON_NAME=apitrace +MASON_VERSION=2018-05-16-7fadfba +GITSHA=7fadfba5cbeada0e198b0ab8f83d88db43b66790 +MASON_LIB_FILE=bin/apitrace + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/apitrace/apitrace/archive/${GITSHA}.tar.gz \ + 62f9850e382362da90b86195ea95b893519e084f + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${GITSHA} +} + +function mason_compile { + cmake -H. -Bbuild -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_GUI=FALSE \ + -DCMAKE_INSTALL_PREFIX="${MASON_PREFIX}" + make -C build + make -C build install +} + +function mason_ldflags { + : +} + +function mason_cflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/bcc/e6c7568/README.md b/scripts/bcc/e6c7568/README.md index de30cdd0b..c74359fe4 100644 --- a/scripts/bcc/e6c7568/README.md +++ b/scripts/bcc/e6c7568/README.md @@ -6,7 +6,7 @@ Learn more about this tool at https://github.com/iovisor/bcc - Ubuntu >= precise - Centos >= 7 - - Amazon linux (tested on `2017.09.d`) + - Amazon linux (tested on `2017.09.d`) (needs `yum install kernel-devel`) - Running within docker ## Not Supported diff --git a/scripts/benchmark/1.4.1-cxx11abi/.travis.yml b/scripts/benchmark/1.4.1-cxx11abi/.travis.yml new file mode 100644 index 000000000..82a92578a --- /dev/null +++ b/scripts/benchmark/1.4.1-cxx11abi/.travis.yml @@ -0,0 +1,28 @@ +language: cpp + +sudo: false + +compiler: clang + +addons: + apt: + update: true + sources: [ 'george-edison55-precise-backports' ] + packages: [ 'cmake', 'cmake-data' ] + +matrix: + exclude: + - os: linux + include: + - os: osx + osx_image: xcode9.3 + env: MASON_PLATFORM=osx + - os: linux + dist: trusty + env: MASON_PLATFORM=linux + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/benchmark/1.4.1-cxx11abi/script.sh b/scripts/benchmark/1.4.1-cxx11abi/script.sh new file mode 100755 index 000000000..016817b84 --- /dev/null +++ b/scripts/benchmark/1.4.1-cxx11abi/script.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +LIB_VERSION=1.4.1 + +MASON_NAME=benchmark +MASON_VERSION=${LIB_VERSION}-cxx11abi +MASON_LIB_FILE=lib/libbenchmark.a + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/google/benchmark/archive/v${LIB_VERSION}.tar.gz \ + 3b750ed1ee9f8fd88efb868060786449498e967e + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/benchmark-${LIB_VERSION} +} + +function mason_compile { + rm -rf build + mkdir -p build + cd build + cmake \ + ${MASON_CMAKE_TOOLCHAIN:-} \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX="${MASON_PREFIX}" \ + -DCMAKE_CXX_FLAGS="${CXXFLAGS:-} -D_GLIBCXX_USE_CXX11_ABI=1" \ + -DBENCHMARK_ENABLE_LTO=ON \ + -DBENCHMARK_ENABLE_TESTING=OFF \ + .. + + VERBOSE=1 make install -j${MASON_CONCURRENCY} +} + +function mason_cflags { + echo -isystem ${MASON_PREFIX}/include +} + +function mason_ldflags { + : +} + +function mason_static_libs { + echo ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/benchmark/1.4.1/.travis.yml b/scripts/benchmark/1.4.1/.travis.yml new file mode 100644 index 000000000..ab8a603c3 --- /dev/null +++ b/scripts/benchmark/1.4.1/.travis.yml @@ -0,0 +1,29 @@ +language: cpp + +sudo: false + +compiler: clang + +addons: + apt: + update: true + sources: [ 'george-edison55-precise-backports' ] + packages: [ 'cmake', 'cmake-data' ] + +matrix: + exclude: + - os: linux + include: + - os: osx + osx_image: xcode9.3 + env: MASON_PLATFORM=osx + - os: linux + dist: trusty + env: MASON_PLATFORM=linux + +script: +- if [[ ${MASON_PLATFORM} == "linux" ]]; then sudo perl -i -p -e "s/# define _GLIBCXX_USE_DUAL_ABI 0/# define _GLIBCXX_USE_DUAL_ABI 1/g;" /usr/include/x86_64-linux-gnu/c++/5/bits/c++config.h ; fi +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/benchmark/1.4.1/script.sh b/scripts/benchmark/1.4.1/script.sh new file mode 100755 index 000000000..6dd6d3896 --- /dev/null +++ b/scripts/benchmark/1.4.1/script.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +MASON_NAME=benchmark +MASON_VERSION=1.4.1 +MASON_LIB_FILE=lib/libbenchmark.a + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/google/benchmark/archive/v${MASON_VERSION}.tar.gz \ + 3b750ed1ee9f8fd88efb868060786449498e967e + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/benchmark-${MASON_VERSION} +} + +function mason_compile { + rm -rf build + mkdir -p build + cd build + cmake \ + ${MASON_CMAKE_TOOLCHAIN:-} \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX="${MASON_PREFIX}" \ + -DBENCHMARK_ENABLE_LTO=ON \ + -DBENCHMARK_ENABLE_TESTING=OFF \ + .. + + VERBOSE=1 make install -j${MASON_CONCURRENCY} +} + +function mason_cflags { + echo -isystem ${MASON_PREFIX}/include +} + +function mason_ldflags { + : +} + +function mason_static_libs { + echo ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/binutils/2.26/script.sh b/scripts/binutils/2.26/script.sh index d485fc884..7401e8435 100755 --- a/scripts/binutils/2.26/script.sh +++ b/scripts/binutils/2.26/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/ld function mason_load_source { mason_download \ - http://ftp.gnu.org/gnu/binutils/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ + https://ftp.gnu.org/gnu/binutils/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ 05b22d6ef8003e76f7d05500363a3ee8cc66a7ae mason_extract_tar_bz2 diff --git a/scripts/binutils/2.27/script.sh b/scripts/binutils/2.27/script.sh index 3149b0c7e..d55d81fe7 100755 --- a/scripts/binutils/2.27/script.sh +++ b/scripts/binutils/2.27/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libbfd.a function mason_load_source { mason_download \ - http://ftp.gnu.org/gnu/binutils/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ + https://ftp.gnu.org/gnu/binutils/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ 7e62c56ea660080882af2c8644d566765a77a0b8 mason_extract_tar_bz2 diff --git a/scripts/binutils/2.28/script.sh b/scripts/binutils/2.28/script.sh index 0e9efa24f..aa5752ef1 100755 --- a/scripts/binutils/2.28/script.sh +++ b/scripts/binutils/2.28/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libbfd.a function mason_load_source { mason_download \ - http://ftp.gnu.org/gnu/binutils/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ + https://ftp.gnu.org/gnu/binutils/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ bea61d1a33e4ed8061f1936ef00a633c7fff096e mason_extract_tar_bz2 diff --git a/scripts/binutils/2.30/script.sh b/scripts/binutils/2.30/script.sh index e8bae5bae..0d02276e8 100755 --- a/scripts/binutils/2.30/script.sh +++ b/scripts/binutils/2.30/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libbfd.a function mason_load_source { mason_download \ - http://ftp.gnu.org/gnu/binutils/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ + https://ftp.gnu.org/gnu/binutils/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ 9bbe52758d123de00bce46d7c657d10aad97bf38 mason_extract_tar_bz2 diff --git a/scripts/binutils/2.31/.travis.yml b/scripts/binutils/2.31/.travis.yml new file mode 100644 index 000000000..052507de1 --- /dev/null +++ b/scripts/binutils/2.31/.travis.yml @@ -0,0 +1,20 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + - bison + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/binutils/2.31/script.sh b/scripts/binutils/2.31/script.sh new file mode 100755 index 000000000..4d4191406 --- /dev/null +++ b/scripts/binutils/2.31/script.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +MASON_NAME=binutils +MASON_VERSION=2.31 +MASON_LIB_FILE=lib/libbfd.a + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://ftp.gnu.org/gnu/binutils/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ + 602856fd5af10a09a123ae25dfb86e3b436549cf + + mason_extract_tar_bz2 + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + # Add optimization flags since CFLAGS overrides the default (-g -O2) + export CFLAGS="${CFLAGS} -O3 -DNDEBUG -Wno-c++11-narrowing" + export CXXFLAGS="${CXXFLAGS} -O3 -DNDEBUG -Wno-c++11-narrowing" + ./configure \ + --prefix=${MASON_PREFIX} \ + --enable-gold \ + --enable-plugins \ + --enable-static \ + --disable-shared \ + --disable-dependency-tracking + + make -j${MASON_CONCURRENCY} + make install + cp include/libiberty.h ${MASON_PREFIX}/include/ + cp libiberty/libiberty.a ${MASON_PREFIX}/lib/ +} + +function mason_cflags { + echo -I${MASON_PREFIX}/include +} + +function mason_ldflags { + : +} + +function mason_static_libs { + echo ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/binutils/2.35/.travis.yml b/scripts/binutils/2.35/.travis.yml new file mode 100644 index 000000000..052507de1 --- /dev/null +++ b/scripts/binutils/2.35/.travis.yml @@ -0,0 +1,20 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + - bison + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/binutils/2.35/script.sh b/scripts/binutils/2.35/script.sh new file mode 100755 index 000000000..2280251ae --- /dev/null +++ b/scripts/binutils/2.35/script.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +MASON_NAME=binutils +MASON_VERSION=2.35 +MASON_LIB_FILE=lib/libbfd.a + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://ftp.gnu.org/gnu/binutils/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ + ba17256adeca65b62515b3f714ffdf55434c981f + + mason_extract_tar_bz2 + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + # Add optimization flags since CFLAGS overrides the default (-g -O2) + export CFLAGS="${CFLAGS} -O3 -DNDEBUG -Wno-c++11-narrowing" + export CXXFLAGS="${CXXFLAGS} -O3 -DNDEBUG -Wno-c++11-narrowing" + ./configure \ + --prefix=${MASON_PREFIX} \ + --enable-gold \ + --enable-plugins \ + --enable-static \ + --disable-shared \ + --disable-dependency-tracking + + make -j${MASON_CONCURRENCY} + make install + cp include/libiberty.h ${MASON_PREFIX}/include/ + cp libiberty/libiberty.a ${MASON_PREFIX}/lib/ +} + +function mason_cflags { + echo -I${MASON_PREFIX}/include +} + +function mason_ldflags { + : +} + +function mason_static_libs { + echo ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/boost/1.57.0/script.sh b/scripts/boost/1.57.0/script.sh index 4cc53c06e..efc1e6c90 100755 --- a/scripts/boost/1.57.0/script.sh +++ b/scripts/boost/1.57.0/script.sh @@ -10,7 +10,7 @@ BOOST_ROOT=${MASON_PREFIX} function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/1.57.0/boost_1_57_0.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/1.57.0/boost_1_57_0.tar.bz2 \ 397306fa6d0858c4885fbba7d43a0164dcb7f53e mason_extract_tar_bz2 boost_1_57_0/boost diff --git a/scripts/boost/1.58.0/script.sh b/scripts/boost/1.58.0/script.sh index cff5ed8dd..d6a7af91a 100755 --- a/scripts/boost/1.58.0/script.sh +++ b/scripts/boost/1.58.0/script.sh @@ -10,7 +10,7 @@ BOOST_ROOT=${MASON_PREFIX} function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/1.58.0/boost_1_58_0.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/1.58.0/boost_1_58_0.tar.bz2 \ 43e46651e762e4daf72a5d21dca86ae151e65378 mason_extract_tar_bz2 boost_1_58_0/boost diff --git a/scripts/boost/1.59.0/script.sh b/scripts/boost/1.59.0/script.sh index 20a2cbbb6..a972a2a1d 100755 --- a/scripts/boost/1.59.0/script.sh +++ b/scripts/boost/1.59.0/script.sh @@ -10,7 +10,7 @@ BOOST_ROOT=${MASON_PREFIX} function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/1.59.0/boost_1_59_0.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/1.59.0/boost_1_59_0.tar.bz2 \ ff2e48f4d7e3c4b393d41e07a2f5d923b990967d mason_extract_tar_bz2 boost_1_59_0/boost diff --git a/scripts/boost/1.60.0/script.sh b/scripts/boost/1.60.0/script.sh index e33e4b6f0..eb0ba1e11 100755 --- a/scripts/boost/1.60.0/script.sh +++ b/scripts/boost/1.60.0/script.sh @@ -10,7 +10,7 @@ BOOST_ROOT=${MASON_PREFIX} function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/1.60.0/boost_1_60_0.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/1.60.0/boost_1_60_0.tar.bz2 \ 40a65135d34c3e3a3cdbe681f06745c086e5b941 mason_extract_tar_bz2 boost_1_60_0/boost diff --git a/scripts/boost/1.61.0/base.sh b/scripts/boost/1.61.0/base.sh index 91aaa7f5f..543f33d55 100644 --- a/scripts/boost/1.61.0/base.sh +++ b/scripts/boost/1.61.0/base.sh @@ -4,7 +4,7 @@ export MASON_VERSION=1.61.0 export BOOST_VERSION=${MASON_VERSION//./_} -export BOOST_TOOLSET=$(basename ${CC}) -export BOOST_TOOLSET_CXX=$(basename ${CXX}) +export BOOST_TOOLSET=$(CC=${CC#ccache }; basename -- ${CC%% *}) +export BOOST_TOOLSET_CXX=$(CXX=${CXX#ccache }; basename -- ${CXX%% *}) export BOOST_ARCH="x86" export BOOST_SHASUM=0a72c541e468d76a957adc14e54688dd695d566f diff --git a/scripts/boost/1.61.0/script.sh b/scripts/boost/1.61.0/script.sh index 22f3d3506..b2328c639 100755 --- a/scripts/boost/1.61.0/script.sh +++ b/scripts/boost/1.61.0/script.sh @@ -19,7 +19,7 @@ source ${HERE}/common.sh # override default unpacking to just unpack headers function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ ${BOOST_SHASUM} mason_extract_tar_bz2 boost_${BOOST_VERSION}/boost diff --git a/scripts/boost/1.62.0/base.sh b/scripts/boost/1.62.0/base.sh index a48ce0a0d..c40536be1 100644 --- a/scripts/boost/1.62.0/base.sh +++ b/scripts/boost/1.62.0/base.sh @@ -4,8 +4,8 @@ export MASON_VERSION=1.62.0 export BOOST_VERSION=${MASON_VERSION//./_} -export BOOST_TOOLSET=$(basename ${CC}) -export BOOST_TOOLSET_CXX=$(basename ${CXX}) +export BOOST_TOOLSET=$(CC=${CC#ccache }; basename -- ${CC%% *}) +export BOOST_TOOLSET_CXX=$(CXX=${CXX#ccache }; basename -- ${CXX%% *}) export BOOST_ARCH="x86" export BOOST_SHASUM=f4151eec3e9394146b7bebcb17b83149de0a6c23 # special override to ensure each library shares the cached download diff --git a/scripts/boost/1.62.0/script.sh b/scripts/boost/1.62.0/script.sh index 2790bbe9a..d6b8d017d 100755 --- a/scripts/boost/1.62.0/script.sh +++ b/scripts/boost/1.62.0/script.sh @@ -18,7 +18,7 @@ source ${HERE}/common.sh # override default unpacking to just unpack headers function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ ${BOOST_SHASUM} mason_extract_tar_bz2 boost_${BOOST_VERSION}/boost diff --git a/scripts/boost/1.63.0/base.sh b/scripts/boost/1.63.0/base.sh index 5b6bd0a8e..3790c7e97 100644 --- a/scripts/boost/1.63.0/base.sh +++ b/scripts/boost/1.63.0/base.sh @@ -4,8 +4,8 @@ export MASON_VERSION=1.63.0 export BOOST_VERSION=${MASON_VERSION//./_} -export BOOST_TOOLSET=$(basename ${CC}) -export BOOST_TOOLSET_CXX=$(basename ${CXX}) +export BOOST_TOOLSET=$(CC=${CC#ccache }; basename -- ${CC%% *}) +export BOOST_TOOLSET_CXX=$(CXX=${CXX#ccache }; basename -- ${CXX%% *}) export BOOST_ARCH="x86" export BOOST_SHASUM=5c5cf0fd35a5950ed9e00ba54153df47747803f9 # special override to ensure each library shares the cached download diff --git a/scripts/boost/1.63.0/script.sh b/scripts/boost/1.63.0/script.sh index 2790bbe9a..d6b8d017d 100755 --- a/scripts/boost/1.63.0/script.sh +++ b/scripts/boost/1.63.0/script.sh @@ -18,7 +18,7 @@ source ${HERE}/common.sh # override default unpacking to just unpack headers function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ ${BOOST_SHASUM} mason_extract_tar_bz2 boost_${BOOST_VERSION}/boost diff --git a/scripts/boost/1.64.0/base.sh b/scripts/boost/1.64.0/base.sh index b2528619f..f563660ac 100644 --- a/scripts/boost/1.64.0/base.sh +++ b/scripts/boost/1.64.0/base.sh @@ -4,8 +4,8 @@ export MASON_VERSION=1.64.0 export BOOST_VERSION=${MASON_VERSION//./_} -export BOOST_TOOLSET=$(basename ${CC}) -export BOOST_TOOLSET_CXX=$(basename ${CXX}) +export BOOST_TOOLSET=$(CC=${CC#ccache }; basename -- ${CC%% *}) +export BOOST_TOOLSET_CXX=$(CXX=${CXX#ccache }; basename -- ${CXX%% *}) export BOOST_ARCH="x86" export BOOST_SHASUM=6e4dad39f14937af73ace20d2279e2468aad14d8 # special override to ensure each library shares the cached download diff --git a/scripts/boost/1.64.0/script.sh b/scripts/boost/1.64.0/script.sh index 2790bbe9a..d6b8d017d 100755 --- a/scripts/boost/1.64.0/script.sh +++ b/scripts/boost/1.64.0/script.sh @@ -18,7 +18,7 @@ source ${HERE}/common.sh # override default unpacking to just unpack headers function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ ${BOOST_SHASUM} mason_extract_tar_bz2 boost_${BOOST_VERSION}/boost diff --git a/scripts/boost/1.65.1/base.sh b/scripts/boost/1.65.1/base.sh index 3b5bb3827..725bbaa37 100644 --- a/scripts/boost/1.65.1/base.sh +++ b/scripts/boost/1.65.1/base.sh @@ -4,8 +4,8 @@ export MASON_VERSION=1.65.1 export BOOST_VERSION=${MASON_VERSION//./_} -export BOOST_TOOLSET=$(basename ${CC}) -export BOOST_TOOLSET_CXX=$(basename ${CXX}) +export BOOST_TOOLSET=$(CC=${CC#ccache }; basename -- ${CC%% *}) +export BOOST_TOOLSET_CXX=$(CXX=${CXX#ccache }; basename -- ${CXX%% *}) export BOOST_ARCH="x86" export BOOST_SHASUM=094a03dd6f07e740719b944cfe01a278f5326315 # special override to ensure each library shares the cached download diff --git a/scripts/boost/1.65.1/script.sh b/scripts/boost/1.65.1/script.sh index 2790bbe9a..d6b8d017d 100755 --- a/scripts/boost/1.65.1/script.sh +++ b/scripts/boost/1.65.1/script.sh @@ -18,7 +18,7 @@ source ${HERE}/common.sh # override default unpacking to just unpack headers function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ ${BOOST_SHASUM} mason_extract_tar_bz2 boost_${BOOST_VERSION}/boost diff --git a/scripts/boost/1.66.0/base.sh b/scripts/boost/1.66.0/base.sh index f30b1a07c..5d1f034fc 100644 --- a/scripts/boost/1.66.0/base.sh +++ b/scripts/boost/1.66.0/base.sh @@ -4,8 +4,8 @@ export MASON_VERSION=1.66.0 export BOOST_VERSION=${MASON_VERSION//./_} -export BOOST_TOOLSET=$(basename ${CC}) -export BOOST_TOOLSET_CXX=$(basename ${CXX}) +export BOOST_TOOLSET=$(CC=${CC#ccache }; basename -- ${CC%% *}) +export BOOST_TOOLSET_CXX=$(CXX=${CXX#ccache }; basename -- ${CXX%% *}) export BOOST_ARCH="x86" export BOOST_SHASUM=5552748d2f0aede9ad1dfbb7f16832bbb054ca4d # special override to ensure each library shares the cached download diff --git a/scripts/boost/1.66.0/script.sh b/scripts/boost/1.66.0/script.sh index 2790bbe9a..d6b8d017d 100755 --- a/scripts/boost/1.66.0/script.sh +++ b/scripts/boost/1.66.0/script.sh @@ -18,7 +18,7 @@ source ${HERE}/common.sh # override default unpacking to just unpack headers function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ ${BOOST_SHASUM} mason_extract_tar_bz2 boost_${BOOST_VERSION}/boost diff --git a/scripts/boost/1.67.0/.travis.yml b/scripts/boost/1.67.0/.travis.yml new file mode 100644 index 000000000..15c3ad555 --- /dev/null +++ b/scripts/boost/1.67.0/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost/1.67.0/base.sh b/scripts/boost/1.67.0/base.sh new file mode 100644 index 000000000..0d4080605 --- /dev/null +++ b/scripts/boost/1.67.0/base.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# NOTE: use the ./utils/new_boost.sh script to create new versions + +export MASON_VERSION=1.67.0 +export BOOST_VERSION=${MASON_VERSION//./_} +export BOOST_TOOLSET=$(CC=${CC#ccache }; basename -- ${CC%% *}) +export BOOST_TOOLSET_CXX=$(CXX=${CXX#ccache }; basename -- ${CXX%% *}) +export BOOST_ARCH="x86" +export BOOST_SHASUM=6dde6a5f874a5dfa75865e4430ff9248a43cab07 +# special override to ensure each library shares the cached download +export MASON_DOWNLOAD_SLUG="boost-${MASON_VERSION}" diff --git a/scripts/boost/1.67.0/common.sh b/scripts/boost/1.67.0/common.sh new file mode 100644 index 000000000..70c45d38a --- /dev/null +++ b/scripts/boost/1.67.0/common.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +function mason_load_source { + mason_download \ + http://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ + ${BOOST_SHASUM} + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION} + + mason_extract_tar_bz2 +} + +function gen_config() { + echo "using $1 : : $(which $2)" > user-config.jam + if [[ "${AR:-false}" != false ]] || [[ "${RANLIB:-false}" != false ]]; then + echo ' : ' >> user-config.jam + if [[ "${AR:-false}" != false ]]; then + echo "${AR} " >> user-config.jam + fi + if [[ "${RANLIB:-false}" != false ]]; then + echo "${RANLIB} " >> user-config.jam + fi + fi + echo ' ;' >> user-config.jam +} + +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -d0 \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +function mason_prefix { + echo "${MASON_PREFIX}" +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + local LOCAL_LDFLAGS + LOCAL_LDFLAGS="-L${MASON_PREFIX}/lib" + if [[ ${BOOST_LIBRARY:-false} != false ]]; then + LOCAL_LDFLAGS="${LOCAL_LDFLAGS} -lboost_${BOOST_LIBRARY}" + fi + echo $LOCAL_LDFLAGS +} diff --git a/scripts/boost/1.67.0/script.sh b/scripts/boost/1.67.0/script.sh new file mode 100755 index 000000000..d6b8d017d --- /dev/null +++ b/scripts/boost/1.67.0/script.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# inherit from boost base (used for all boost library packages) +source ${HERE}/base.sh + +# this package is the one that is header-only +MASON_NAME=boost +MASON_HEADER_ONLY=true + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${HERE}/common.sh + +# override default unpacking to just unpack headers +function mason_load_source { + mason_download \ + https://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ + ${BOOST_SHASUM} + + mason_extract_tar_bz2 boost_${BOOST_VERSION}/boost + + MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION} +} + +# override default "compile" target for just the header install +function mason_compile { + mkdir -p ${MASON_PREFIX}/include + cp -r ${MASON_ROOT}/.build/boost_${BOOST_VERSION}/boost ${MASON_PREFIX}/include + + # work around NDK bug https://code.google.com/p/android/issues/detail?id=79483 + + patch ${MASON_PREFIX}/include/boost/core/demangle.hpp <<< "19a20,21 +> #if !defined(__ANDROID__) +> +25a28,29 +> #endif +> +" + + # work around https://github.com/Project-OSRM/node-osrm/issues/191 + patch ${MASON_PREFIX}/include/boost/interprocess/detail/os_file_functions.hpp <<< "471c471 +< return ::open(name, (int)mode); +--- +> return ::open(name, (int)mode,S_IRUSR|S_IWUSR); +" + +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/boost/1.72.0/.travis.yml b/scripts/boost/1.72.0/.travis.yml new file mode 100644 index 000000000..15c3ad555 --- /dev/null +++ b/scripts/boost/1.72.0/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost/1.72.0/base.sh b/scripts/boost/1.72.0/base.sh new file mode 100644 index 000000000..90b73aed9 --- /dev/null +++ b/scripts/boost/1.72.0/base.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# NOTE: use the ./utils/new_boost.sh script to create new versions + +export MASON_VERSION=1.72.0 +export BOOST_VERSION=${MASON_VERSION//./_} +export BOOST_TOOLSET=$(CC=${CC#ccache }; basename -- ${CC%% *}) +export BOOST_TOOLSET_CXX=$(CXX=${CXX#ccache }; basename -- ${CXX%% *}) +export BOOST_ARCH="x86" +export BOOST_SHASUM=c3682fdf07771570c1a55d992d9fde221ef14131 +# special override to ensure each library shares the cached download +export MASON_DOWNLOAD_SLUG="boost-${MASON_VERSION}" diff --git a/scripts/boost/1.72.0/common.sh b/scripts/boost/1.72.0/common.sh new file mode 100644 index 000000000..70c45d38a --- /dev/null +++ b/scripts/boost/1.72.0/common.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +function mason_load_source { + mason_download \ + http://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ + ${BOOST_SHASUM} + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION} + + mason_extract_tar_bz2 +} + +function gen_config() { + echo "using $1 : : $(which $2)" > user-config.jam + if [[ "${AR:-false}" != false ]] || [[ "${RANLIB:-false}" != false ]]; then + echo ' : ' >> user-config.jam + if [[ "${AR:-false}" != false ]]; then + echo "${AR} " >> user-config.jam + fi + if [[ "${RANLIB:-false}" != false ]]; then + echo "${RANLIB} " >> user-config.jam + fi + fi + echo ' ;' >> user-config.jam +} + +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -d0 \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +function mason_prefix { + echo "${MASON_PREFIX}" +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + local LOCAL_LDFLAGS + LOCAL_LDFLAGS="-L${MASON_PREFIX}/lib" + if [[ ${BOOST_LIBRARY:-false} != false ]]; then + LOCAL_LDFLAGS="${LOCAL_LDFLAGS} -lboost_${BOOST_LIBRARY}" + fi + echo $LOCAL_LDFLAGS +} diff --git a/scripts/boost/1.72.0/script.sh b/scripts/boost/1.72.0/script.sh new file mode 100755 index 000000000..d6b8d017d --- /dev/null +++ b/scripts/boost/1.72.0/script.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# inherit from boost base (used for all boost library packages) +source ${HERE}/base.sh + +# this package is the one that is header-only +MASON_NAME=boost +MASON_HEADER_ONLY=true + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${HERE}/common.sh + +# override default unpacking to just unpack headers +function mason_load_source { + mason_download \ + https://downloads.sourceforge.net/project/boost/boost/${MASON_VERSION}/boost_${BOOST_VERSION}.tar.bz2 \ + ${BOOST_SHASUM} + + mason_extract_tar_bz2 boost_${BOOST_VERSION}/boost + + MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION} +} + +# override default "compile" target for just the header install +function mason_compile { + mkdir -p ${MASON_PREFIX}/include + cp -r ${MASON_ROOT}/.build/boost_${BOOST_VERSION}/boost ${MASON_PREFIX}/include + + # work around NDK bug https://code.google.com/p/android/issues/detail?id=79483 + + patch ${MASON_PREFIX}/include/boost/core/demangle.hpp <<< "19a20,21 +> #if !defined(__ANDROID__) +> +25a28,29 +> #endif +> +" + + # work around https://github.com/Project-OSRM/node-osrm/issues/191 + patch ${MASON_PREFIX}/include/boost/interprocess/detail/os_file_functions.hpp <<< "471c471 +< return ::open(name, (int)mode); +--- +> return ::open(name, (int)mode,S_IRUSR|S_IWUSR); +" + +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/boost/1.73.0/.travis.yml b/scripts/boost/1.73.0/.travis.yml new file mode 100644 index 000000000..3d577c6b8 --- /dev/null +++ b/scripts/boost/1.73.0/.travis.yml @@ -0,0 +1,7 @@ +jobs: + include: + - os: linux + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost/1.73.0/base.sh b/scripts/boost/1.73.0/base.sh new file mode 100644 index 000000000..ca71529e6 --- /dev/null +++ b/scripts/boost/1.73.0/base.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# NOTE: use the ./utils/new_boost.sh script to create new versions + +export MASON_VERSION=1.73.0 +export BOOST_VERSION=${MASON_VERSION//./_} +export BOOST_TOOLSET=$(CC=${CC#ccache }; basename -- ${CC%% *}) +export BOOST_TOOLSET_CXX=$(CXX=${CXX#ccache }; basename -- ${CXX%% *}) +export BOOST_ARCH="x86" +export BOOST_SHASUM=a8e153013be01809f26362c43478b4763cc21010 +# special override to ensure each library shares the cached download +export MASON_DOWNLOAD_SLUG="boost-${MASON_VERSION}" diff --git a/scripts/boost/1.73.0/common.sh b/scripts/boost/1.73.0/common.sh new file mode 100644 index 000000000..ab21134d1 --- /dev/null +++ b/scripts/boost/1.73.0/common.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +function mason_load_source { + mason_download \ + https://dl.bintray.com/boostorg/release/${MASON_VERSION}/source/boost_${BOOST_VERSION}.tar.bz2 \ + ${BOOST_SHASUM} + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION} + + mason_extract_tar_bz2 +} + +function gen_config() { + echo "using $1 : : $(which $2)" > user-config.jam + if [[ "${AR:-false}" != false ]] || [[ "${RANLIB:-false}" != false ]]; then + echo ' : ' >> user-config.jam + if [[ "${AR:-false}" != false ]]; then + echo "${AR} " >> user-config.jam + fi + if [[ "${RANLIB:-false}" != false ]]; then + echo "${RANLIB} " >> user-config.jam + fi + fi + echo ' ;' >> user-config.jam +} + +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -d0 \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +function mason_prefix { + echo "${MASON_PREFIX}" +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + local LOCAL_LDFLAGS + LOCAL_LDFLAGS="-L${MASON_PREFIX}/lib" + if [[ ${BOOST_LIBRARY:-false} != false ]]; then + LOCAL_LDFLAGS="${LOCAL_LDFLAGS} -lboost_${BOOST_LIBRARY}" + fi + echo $LOCAL_LDFLAGS +} diff --git a/scripts/boost/1.73.0/script.sh b/scripts/boost/1.73.0/script.sh new file mode 100755 index 000000000..f8907a4e1 --- /dev/null +++ b/scripts/boost/1.73.0/script.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# inherit from boost base (used for all boost library packages) +source ${HERE}/base.sh + +# this package is the one that is header-only +MASON_NAME=boost +MASON_HEADER_ONLY=true + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${HERE}/common.sh + +# override default unpacking to just unpack headers +function mason_load_source { + mason_download \ + https://dl.bintray.com/boostorg/release/${MASON_VERSION}/source/boost_${BOOST_VERSION}.tar.bz2 \ + ${BOOST_SHASUM} + + mason_extract_tar_bz2 boost_${BOOST_VERSION}/boost + + MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION} +} + +# override default "compile" target for just the header install +function mason_compile { + mkdir -p ${MASON_PREFIX}/include + cp -r ${MASON_ROOT}/.build/boost_${BOOST_VERSION}/boost ${MASON_PREFIX}/include +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/boost/1.74.0/.travis.yml b/scripts/boost/1.74.0/.travis.yml new file mode 100644 index 000000000..3d577c6b8 --- /dev/null +++ b/scripts/boost/1.74.0/.travis.yml @@ -0,0 +1,7 @@ +jobs: + include: + - os: linux + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost/1.74.0/base.sh b/scripts/boost/1.74.0/base.sh new file mode 100644 index 000000000..99360b1cb --- /dev/null +++ b/scripts/boost/1.74.0/base.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# NOTE: use the ./utils/new_boost.sh script to create new versions + +export MASON_VERSION=1.74.0 +export BOOST_VERSION=${MASON_VERSION//./_} +export BOOST_TOOLSET=$(CC=${CC#ccache }; basename -- ${CC%% *}) +export BOOST_TOOLSET_CXX=$(CXX=${CXX#ccache }; basename -- ${CXX%% *}) +export BOOST_ARCH="x86" +export BOOST_SHASUM=6267038c609fc972b69d2ed03b1dc958f4e6a96d +# special override to ensure each library shares the cached download +export MASON_DOWNLOAD_SLUG="boost-${MASON_VERSION}" diff --git a/scripts/boost/1.74.0/common.sh b/scripts/boost/1.74.0/common.sh new file mode 100644 index 000000000..ab21134d1 --- /dev/null +++ b/scripts/boost/1.74.0/common.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +function mason_load_source { + mason_download \ + https://dl.bintray.com/boostorg/release/${MASON_VERSION}/source/boost_${BOOST_VERSION}.tar.bz2 \ + ${BOOST_SHASUM} + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION} + + mason_extract_tar_bz2 +} + +function gen_config() { + echo "using $1 : : $(which $2)" > user-config.jam + if [[ "${AR:-false}" != false ]] || [[ "${RANLIB:-false}" != false ]]; then + echo ' : ' >> user-config.jam + if [[ "${AR:-false}" != false ]]; then + echo "${AR} " >> user-config.jam + fi + if [[ "${RANLIB:-false}" != false ]]; then + echo "${RANLIB} " >> user-config.jam + fi + fi + echo ' ;' >> user-config.jam +} + +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -d0 \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +function mason_prefix { + echo "${MASON_PREFIX}" +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + local LOCAL_LDFLAGS + LOCAL_LDFLAGS="-L${MASON_PREFIX}/lib" + if [[ ${BOOST_LIBRARY:-false} != false ]]; then + LOCAL_LDFLAGS="${LOCAL_LDFLAGS} -lboost_${BOOST_LIBRARY}" + fi + echo $LOCAL_LDFLAGS +} diff --git a/scripts/boost/1.74.0/patch.diff b/scripts/boost/1.74.0/patch.diff new file mode 100644 index 000000000..f9ca965a3 --- /dev/null +++ b/scripts/boost/1.74.0/patch.diff @@ -0,0 +1,437 @@ +diff --git a/boost/asio/execution/any_executor.hpp b/boost/asio/execution/any_executor.hpp +index 9c7018c..bb073f4 100644 +--- a/boost/asio/execution/any_executor.hpp ++++ b/boost/asio/execution/any_executor.hpp +@@ -620,7 +620,11 @@ public: + return static_cast(target_); + } + +- const std::type_info& target_type() const ++#if !defined(ASIO_NO_TYPEID) ++ const std::type_info& target_type() const ++#else // !defined(ASIO_NO_TYPEID) ++ const void* target_type() const ++#endif // !defined(ASIO_NO_TYPEID) + { + return target_fns_->target_type(); + } +@@ -800,16 +804,27 @@ protected: + + struct target_fns + { ++#if !defined(ASIO_NO_TYPEID) + const std::type_info& (*target_type)(); ++#else // !defined(ASIO_NO_TYPEID) ++ const void* (*target_type)(); ++#endif // !defined(ASIO_NO_TYPEID) + bool (*equal)(const any_executor_base&, const any_executor_base&); + void (*execute)(const any_executor_base&, BOOST_ASIO_MOVE_ARG(function)); + void (*blocking_execute)(const any_executor_base&, function_view); + }; + ++#if !defined(ASIO_NO_TYPEID) + static const std::type_info& target_type_void() + { + return typeid(void); + } ++#else // !defined(ASIO_NO_TYPEID) ++ static const void* target_type_void() ++ { ++ return 0; ++ } ++#endif // !defined(ASIO_NO_TYPEID) + + static bool equal_void(const any_executor_base&, const any_executor_base&) + { +@@ -845,11 +860,19 @@ protected: + return &fns; + } + ++#if !defined(ASIO_NO_TYPEID) + template + static const std::type_info& target_type_ex() + { + return typeid(Ex); + } ++#else // !defined(ASIO_NO_TYPEID) ++ template ++ static const void* target_type_ex() ++ { ++ return Ex::type_id(); ++ } ++#endif // !defined(ASIO_NO_TYPEID) + + template + static bool equal_ex(const any_executor_base& ex1, + +diff --git a/boost/property_tree/detail/ptree_implementation.hpp b/boost/property_tree/detail/ptree_implementation.hpp +index dd9fd37..71ce6b5 100644 +--- a/boost/property_tree/detail/ptree_implementation.hpp ++++ b/boost/property_tree/detail/ptree_implementation.hpp +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + + #if (defined(BOOST_MSVC) && \ +@@ -669,7 +670,8 @@ namespace boost { namespace property_tree + } + BOOST_PROPERTY_TREE_THROW(ptree_bad_data( + std::string("conversion of data to type \"") + +- typeid(Type).name() + "\" failed", data())); ++ boost::typeindex::type_id().pretty_name() + ++ "\" failed", data())); + } + + template +@@ -824,7 +826,8 @@ namespace boost { namespace property_tree + data() = *o; + } else { + BOOST_PROPERTY_TREE_THROW(ptree_bad_data( +- std::string("conversion of type \"") + typeid(Type).name() + ++ std::string("conversion of type \"") + ++ boost::typeindex::type_id().pretty_name() + + "\" to data failed", boost::any())); + } + } +diff --git a/boost/property_tree/detail/info_parser_read.hpp b/boost/property_tree/detail/info_parser_read.hpp +index 87ef2cd..c3446b4 100644 +--- a/boost/property_tree/detail/info_parser_read.hpp ++++ b/boost/property_tree/detail/info_parser_read.hpp +@@ -13,6 +13,8 @@ + #include "boost/property_tree/ptree.hpp" + #include "boost/property_tree/detail/info_parser_error.hpp" + #include "boost/property_tree/detail/info_parser_utils.hpp" ++#include "boost/core/ignore_unused.hpp" ++#include "boost/core/no_exceptions_support.hpp" + #include + #include + #include +@@ -210,7 +212,13 @@ namespace boost { namespace property_tree { namespace info_parser + std::stack stack; + stack.push(&pt); // Push root ptree on stack initially + +- try { ++ // When compiling without exception support there is no formal ++ // parameter "e" in the catch handler. Declaring a local variable ++ // here does not hurt and will be "used" to make the code in the ++ // handler compilable although the code will never be executed. ++ info_parser_error e("", "", 0); ignore_unused(e); ++ ++ BOOST_TRY { + // While there are characters in the stream + while (stream.good()) { + // Read one line from stream +@@ -372,7 +380,7 @@ namespace boost { namespace property_tree { namespace info_parser + BOOST_PROPERTY_TREE_THROW(info_parser_error("unmatched {", "", 0)); + + } +- catch (info_parser_error &e) ++ BOOST_CATCH (info_parser_error &e) + { + // If line undefined rethrow error with correct filename and line + if (e.line() == 0) +@@ -383,6 +391,7 @@ namespace boost { namespace property_tree { namespace info_parser + BOOST_PROPERTY_TREE_THROW(e); + + } ++ BOOST_CATCH_END + + } + +diff --git a/boost/property_tree/detail/rapidxml.hpp b/boost/property_tree/detail/rapidxml.hpp +index 9e3d76a..e890feb 100644 +--- a/boost/property_tree/detail/rapidxml.hpp ++++ b/boost/property_tree/detail/rapidxml.hpp +@@ -28,7 +28,7 @@ + + #include // For std::exception + +-#define BOOST_PROPERTY_TREE_RAPIDXML_PARSE_ERROR(what, where) throw parse_error(what, where) ++#define BOOST_PROPERTY_TREE_RAPIDXML_PARSE_ERROR(what, where) boost::throw_exception(parse_error(what, where)) + + namespace boost { namespace property_tree { namespace detail {namespace rapidxml + { +diff --git a/boost/property_tree/detail/xml_parser_read_rapidxml.hpp b/boost/property_tree/detail/xml_parser_read_rapidxml.hpp +index 9c04219..a6b005a 100644 +--- a/boost/property_tree/detail/xml_parser_read_rapidxml.hpp ++++ b/boost/property_tree/detail/xml_parser_read_rapidxml.hpp +@@ -15,6 +15,8 @@ + #include + #include + #include ++#include ++#include + #include + + namespace boost { namespace property_tree { namespace xml_parser +@@ -101,7 +103,13 @@ namespace boost { namespace property_tree { namespace xml_parser + xml_parser_error("read error", filename, 0)); + v.push_back(0); // zero-terminate + +- try { ++ // When compiling without exception support there is no formal ++ // parameter "e" in the catch handler. Declaring a local variable ++ // here does not hurt and will be "used" to make the code in the ++ // handler compilable although the code will never be executed. ++ parse_error e(NULL, NULL); ignore_unused(e); ++ ++ BOOST_TRY { + // Parse using appropriate flags + const int f_tws = parse_normalize_whitespace + | parse_trim_whitespace; +@@ -131,12 +139,13 @@ namespace boost { namespace property_tree { namespace xml_parser + + // Swap local and result ptrees + pt.swap(local); +- } catch (parse_error &e) { ++ } BOOST_CATCH (parse_error &e) { + long line = static_cast( + std::count(&v.front(), e.where(), Ch('\n')) + 1); + BOOST_PROPERTY_TREE_THROW( + xml_parser_error(e.what(), filename, line)); + } ++ BOOST_CATCH_END + } + + } } } +diff --git a/boost/property_tree/info_parser.hpp b/boost/property_tree/info_parser.hpp +index 683ddad..abdc8a3 100644 +--- a/boost/property_tree/info_parser.hpp ++++ b/boost/property_tree/info_parser.hpp +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + + namespace boost { namespace property_tree { namespace info_parser +@@ -43,11 +44,12 @@ namespace boost { namespace property_tree { namespace info_parser + void read_info(std::basic_istream &stream, Ptree &pt, + const Ptree &default_ptree) + { +- try { ++ BOOST_TRY { + read_info(stream, pt); +- } catch(file_parser_error &) { ++ } BOOST_CATCH(file_parser_error &) { + pt = default_ptree; + } ++ BOOST_CATCH_END + } + + /** +@@ -87,11 +89,12 @@ namespace boost { namespace property_tree { namespace info_parser + const Ptree &default_ptree, + const std::locale &loc = std::locale()) + { +- try { ++ BOOST_TRY { + read_info(filename, pt, loc); +- } catch(file_parser_error &) { ++ } BOOST_CATCH(file_parser_error &) { + pt = default_ptree; + } ++ BOOST_CATCH_END + } + + /** +diff --git a/boost/property_tree/ini_parser.hpp b/boost/property_tree/ini_parser.hpp +index 50d3c97..5142dbf 100644 +--- a/boost/property_tree/ini_parser.hpp ++++ b/boost/property_tree/ini_parser.hpp +@@ -14,6 +14,8 @@ + #include + #include + #include ++#include ++#include + #include + #include + #include +@@ -165,13 +167,21 @@ namespace boost { namespace property_tree { namespace ini_parser + BOOST_PROPERTY_TREE_THROW(ini_parser_error( + "cannot open file", filename, 0)); + stream.imbue(loc); +- try { ++ ++ // When compiling without exception support there is no formal ++ // parameter "e" in the catch handler. Declaring a local variable ++ // here does not hurt and will be "used" to make the code in the ++ // handler compilable although the code will never be executed. ++ ini_parser_error e("", "", 0); ignore_unused(e); ++ ++ BOOST_TRY { + read_ini(stream, pt); + } +- catch (ini_parser_error &e) { ++ BOOST_CATCH (ini_parser_error &e) { + BOOST_PROPERTY_TREE_THROW(ini_parser_error( + e.message(), filename, e.line())); + } ++ BOOST_CATCH_END + } + + namespace detail +@@ -313,13 +323,21 @@ namespace boost { namespace property_tree { namespace ini_parser + BOOST_PROPERTY_TREE_THROW(ini_parser_error( + "cannot open file", filename, 0)); + stream.imbue(loc); +- try { ++ ++ // When compiling without exception support there is no formal ++ // parameter "e" in the catch handler. Declaring a local variable ++ // here does not hurt and will be "used" to make the code in the ++ // handler compilable although the code will never be executed. ++ ini_parser_error e("", "", 0); ignore_unused(e); ++ ++ BOOST_TRY { + write_ini(stream, pt, flags); + } +- catch (ini_parser_error &e) { ++ BOOST_CATCH (ini_parser_error &e) { + BOOST_PROPERTY_TREE_THROW(ini_parser_error( + e.message(), filename, e.line())); + } ++ BOOST_CATCH_END + } + + } } } + +diff --git a/boost/property_tree/detail/info_parser_read.hpp b/boost/property_tree/detail/info_parser_read.hpp +index c3446b4..b46643a 100644 +--- a/boost/property_tree/detail/info_parser_read.hpp ++++ b/boost/property_tree/detail/info_parser_read.hpp +@@ -13,7 +13,6 @@ + #include "boost/property_tree/ptree.hpp" + #include "boost/property_tree/detail/info_parser_error.hpp" + #include "boost/property_tree/detail/info_parser_utils.hpp" +-#include "boost/core/ignore_unused.hpp" + #include "boost/core/no_exceptions_support.hpp" + #include + #include +@@ -212,12 +211,6 @@ namespace boost { namespace property_tree { namespace info_parser + std::stack stack; + stack.push(&pt); // Push root ptree on stack initially + +- // When compiling without exception support there is no formal +- // parameter "e" in the catch handler. Declaring a local variable +- // here does not hurt and will be "used" to make the code in the +- // handler compilable although the code will never be executed. +- info_parser_error e("", "", 0); ignore_unused(e); +- + BOOST_TRY { + // While there are characters in the stream + while (stream.good()) { +@@ -382,6 +375,7 @@ namespace boost { namespace property_tree { namespace info_parser + } + BOOST_CATCH (info_parser_error &e) + { ++ #ifndef BOOST_NO_EXCEPTIONS + // If line undefined rethrow error with correct filename and line + if (e.line() == 0) + { +@@ -389,7 +383,7 @@ namespace boost { namespace property_tree { namespace info_parser + } + else + BOOST_PROPERTY_TREE_THROW(e); +- ++ #endif + } + BOOST_CATCH_END + +diff --git a/boost/property_tree/detail/xml_parser_read_rapidxml.hpp b/boost/property_tree/detail/xml_parser_read_rapidxml.hpp +index a6b005a..b6f5820 100644 +--- a/boost/property_tree/detail/xml_parser_read_rapidxml.hpp ++++ b/boost/property_tree/detail/xml_parser_read_rapidxml.hpp +@@ -15,7 +15,6 @@ + #include + #include + #include +-#include + #include + #include + +@@ -103,12 +102,6 @@ namespace boost { namespace property_tree { namespace xml_parser + xml_parser_error("read error", filename, 0)); + v.push_back(0); // zero-terminate + +- // When compiling without exception support there is no formal +- // parameter "e" in the catch handler. Declaring a local variable +- // here does not hurt and will be "used" to make the code in the +- // handler compilable although the code will never be executed. +- parse_error e(NULL, NULL); ignore_unused(e); +- + BOOST_TRY { + // Parse using appropriate flags + const int f_tws = parse_normalize_whitespace +@@ -140,10 +133,12 @@ namespace boost { namespace property_tree { namespace xml_parser + // Swap local and result ptrees + pt.swap(local); + } BOOST_CATCH (parse_error &e) { ++ #ifndef BOOST_NO_EXCEPTIONS + long line = static_cast( + std::count(&v.front(), e.where(), Ch('\n')) + 1); + BOOST_PROPERTY_TREE_THROW( + xml_parser_error(e.what(), filename, line)); ++ #endif + } + BOOST_CATCH_END + } +diff --git a/boost/property_tree/ini_parser.hpp b/boost/property_tree/ini_parser.hpp +index 5142dbf..cb63fcc 100644 +--- a/boost/property_tree/ini_parser.hpp ++++ b/boost/property_tree/ini_parser.hpp +@@ -14,7 +14,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -168,18 +167,14 @@ namespace boost { namespace property_tree { namespace ini_parser + "cannot open file", filename, 0)); + stream.imbue(loc); + +- // When compiling without exception support there is no formal +- // parameter "e" in the catch handler. Declaring a local variable +- // here does not hurt and will be "used" to make the code in the +- // handler compilable although the code will never be executed. +- ini_parser_error e("", "", 0); ignore_unused(e); +- + BOOST_TRY { + read_ini(stream, pt); + } + BOOST_CATCH (ini_parser_error &e) { ++ #ifndef BOOST_NO_EXCEPTIONS + BOOST_PROPERTY_TREE_THROW(ini_parser_error( + e.message(), filename, e.line())); ++ #endif + } + BOOST_CATCH_END + } +@@ -324,18 +319,14 @@ namespace boost { namespace property_tree { namespace ini_parser + "cannot open file", filename, 0)); + stream.imbue(loc); + +- // When compiling without exception support there is no formal +- // parameter "e" in the catch handler. Declaring a local variable +- // here does not hurt and will be "used" to make the code in the +- // handler compilable although the code will never be executed. +- ini_parser_error e("", "", 0); ignore_unused(e); +- + BOOST_TRY { + write_ini(stream, pt, flags); + } + BOOST_CATCH (ini_parser_error &e) { ++ #ifndef BOOST_NO_EXCEPTIONS + BOOST_PROPERTY_TREE_THROW(ini_parser_error( + e.message(), filename, e.line())); ++ #endif + } + BOOST_CATCH_END + } diff --git a/scripts/boost/1.74.0/script.sh b/scripts/boost/1.74.0/script.sh new file mode 100755 index 000000000..0692f6b25 --- /dev/null +++ b/scripts/boost/1.74.0/script.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# inherit from boost base (used for all boost library packages) +source ${HERE}/base.sh + +# this package is the one that is header-only +MASON_NAME=boost +MASON_HEADER_ONLY=true + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${HERE}/common.sh + +# override default unpacking to just unpack headers +function mason_load_source { + mason_download \ + https://dl.bintray.com/boostorg/release/${MASON_VERSION}/source/boost_${BOOST_VERSION}.tar.bz2 \ + ${BOOST_SHASUM} + + mason_extract_tar_bz2 boost_${BOOST_VERSION}/boost + + MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION} +} + +# override default "compile" target for just the header install +function mason_compile { + patch -N -p1 < ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff + mkdir -p ${MASON_PREFIX}/include + cp -r ${MASON_ROOT}/.build/boost_${BOOST_VERSION}/boost ${MASON_PREFIX}/include +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/boost/1.75.0/.travis.yml b/scripts/boost/1.75.0/.travis.yml new file mode 100644 index 000000000..3d577c6b8 --- /dev/null +++ b/scripts/boost/1.75.0/.travis.yml @@ -0,0 +1,7 @@ +jobs: + include: + - os: linux + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost/1.75.0/base.sh b/scripts/boost/1.75.0/base.sh new file mode 100644 index 000000000..761318345 --- /dev/null +++ b/scripts/boost/1.75.0/base.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# NOTE: use the ./utils/new_boost.sh script to create new versions + +export MASON_VERSION=1.75.0 +export BOOST_VERSION=${MASON_VERSION//./_} +export BOOST_TOOLSET=$(CC=${CC#ccache }; basename -- ${CC%% *}) +export BOOST_TOOLSET_CXX=$(CXX=${CXX#ccache }; basename -- ${CXX%% *}) +export BOOST_ARCH="x86" +export BOOST_SHASUM=1a5d6590555afdfada1428f1469ec2a8053e10b5 +# special override to ensure each library shares the cached download +export MASON_DOWNLOAD_SLUG="boost-${MASON_VERSION}" diff --git a/scripts/boost/1.75.0/common.sh b/scripts/boost/1.75.0/common.sh new file mode 100644 index 000000000..ab21134d1 --- /dev/null +++ b/scripts/boost/1.75.0/common.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +function mason_load_source { + mason_download \ + https://dl.bintray.com/boostorg/release/${MASON_VERSION}/source/boost_${BOOST_VERSION}.tar.bz2 \ + ${BOOST_SHASUM} + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION} + + mason_extract_tar_bz2 +} + +function gen_config() { + echo "using $1 : : $(which $2)" > user-config.jam + if [[ "${AR:-false}" != false ]] || [[ "${RANLIB:-false}" != false ]]; then + echo ' : ' >> user-config.jam + if [[ "${AR:-false}" != false ]]; then + echo "${AR} " >> user-config.jam + fi + if [[ "${RANLIB:-false}" != false ]]; then + echo "${RANLIB} " >> user-config.jam + fi + fi + echo ' ;' >> user-config.jam +} + +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -d0 \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +function mason_prefix { + echo "${MASON_PREFIX}" +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + local LOCAL_LDFLAGS + LOCAL_LDFLAGS="-L${MASON_PREFIX}/lib" + if [[ ${BOOST_LIBRARY:-false} != false ]]; then + LOCAL_LDFLAGS="${LOCAL_LDFLAGS} -lboost_${BOOST_LIBRARY}" + fi + echo $LOCAL_LDFLAGS +} diff --git a/scripts/boost/1.75.0/patch.diff b/scripts/boost/1.75.0/patch.diff new file mode 100644 index 000000000..9c82c7f7c --- /dev/null +++ b/scripts/boost/1.75.0/patch.diff @@ -0,0 +1,371 @@ +diff --git a/boost/property_tree/detail/ptree_implementation.hpp b/boost/property_tree/detail/ptree_implementation.hpp +index dd9fd37..71ce6b5 100644 +--- a/boost/property_tree/detail/ptree_implementation.hpp ++++ b/boost/property_tree/detail/ptree_implementation.hpp +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + + #if (defined(BOOST_MSVC) && \ +@@ -669,7 +670,8 @@ namespace boost { namespace property_tree + } + BOOST_PROPERTY_TREE_THROW(ptree_bad_data( + std::string("conversion of data to type \"") + +- typeid(Type).name() + "\" failed", data())); ++ boost::typeindex::type_id().pretty_name() + ++ "\" failed", data())); + } + + template +@@ -824,7 +826,8 @@ namespace boost { namespace property_tree + data() = *o; + } else { + BOOST_PROPERTY_TREE_THROW(ptree_bad_data( +- std::string("conversion of type \"") + typeid(Type).name() + ++ std::string("conversion of type \"") + ++ boost::typeindex::type_id().pretty_name() + + "\" to data failed", boost::any())); + } + } +diff --git a/boost/property_tree/detail/info_parser_read.hpp b/boost/property_tree/detail/info_parser_read.hpp +index 87ef2cd..c3446b4 100644 +--- a/boost/property_tree/detail/info_parser_read.hpp ++++ b/boost/property_tree/detail/info_parser_read.hpp +@@ -13,6 +13,8 @@ + #include "boost/property_tree/ptree.hpp" + #include "boost/property_tree/detail/info_parser_error.hpp" + #include "boost/property_tree/detail/info_parser_utils.hpp" ++#include "boost/core/ignore_unused.hpp" ++#include "boost/core/no_exceptions_support.hpp" + #include + #include + #include +@@ -210,7 +212,13 @@ namespace boost { namespace property_tree { namespace info_parser + std::stack stack; + stack.push(&pt); // Push root ptree on stack initially + +- try { ++ // When compiling without exception support there is no formal ++ // parameter "e" in the catch handler. Declaring a local variable ++ // here does not hurt and will be "used" to make the code in the ++ // handler compilable although the code will never be executed. ++ info_parser_error e("", "", 0); ignore_unused(e); ++ ++ BOOST_TRY { + // While there are characters in the stream + while (stream.good()) { + // Read one line from stream +@@ -372,7 +380,7 @@ namespace boost { namespace property_tree { namespace info_parser + BOOST_PROPERTY_TREE_THROW(info_parser_error("unmatched {", "", 0)); + + } +- catch (info_parser_error &e) ++ BOOST_CATCH (info_parser_error &e) + { + // If line undefined rethrow error with correct filename and line + if (e.line() == 0) +@@ -383,6 +391,7 @@ namespace boost { namespace property_tree { namespace info_parser + BOOST_PROPERTY_TREE_THROW(e); + + } ++ BOOST_CATCH_END + + } + +diff --git a/boost/property_tree/detail/rapidxml.hpp b/boost/property_tree/detail/rapidxml.hpp +index 9e3d76a..e890feb 100644 +--- a/boost/property_tree/detail/rapidxml.hpp ++++ b/boost/property_tree/detail/rapidxml.hpp +@@ -28,7 +28,7 @@ + + #include // For std::exception + +-#define BOOST_PROPERTY_TREE_RAPIDXML_PARSE_ERROR(what, where) throw parse_error(what, where) ++#define BOOST_PROPERTY_TREE_RAPIDXML_PARSE_ERROR(what, where) boost::throw_exception(parse_error(what, where)) + + namespace boost { namespace property_tree { namespace detail {namespace rapidxml + { +diff --git a/boost/property_tree/detail/xml_parser_read_rapidxml.hpp b/boost/property_tree/detail/xml_parser_read_rapidxml.hpp +index 9c04219..a6b005a 100644 +--- a/boost/property_tree/detail/xml_parser_read_rapidxml.hpp ++++ b/boost/property_tree/detail/xml_parser_read_rapidxml.hpp +@@ -15,6 +15,8 @@ + #include + #include + #include ++#include ++#include + #include + + namespace boost { namespace property_tree { namespace xml_parser +@@ -101,7 +103,13 @@ namespace boost { namespace property_tree { namespace xml_parser + xml_parser_error("read error", filename, 0)); + v.push_back(0); // zero-terminate + +- try { ++ // When compiling without exception support there is no formal ++ // parameter "e" in the catch handler. Declaring a local variable ++ // here does not hurt and will be "used" to make the code in the ++ // handler compilable although the code will never be executed. ++ parse_error e(NULL, NULL); ignore_unused(e); ++ ++ BOOST_TRY { + // Parse using appropriate flags + const int f_tws = parse_normalize_whitespace + | parse_trim_whitespace; +@@ -131,12 +139,13 @@ namespace boost { namespace property_tree { namespace xml_parser + + // Swap local and result ptrees + pt.swap(local); +- } catch (parse_error &e) { ++ } BOOST_CATCH (parse_error &e) { + long line = static_cast( + std::count(&v.front(), e.where(), Ch('\n')) + 1); + BOOST_PROPERTY_TREE_THROW( + xml_parser_error(e.what(), filename, line)); + } ++ BOOST_CATCH_END + } + + } } } +diff --git a/boost/property_tree/info_parser.hpp b/boost/property_tree/info_parser.hpp +index 683ddad..abdc8a3 100644 +--- a/boost/property_tree/info_parser.hpp ++++ b/boost/property_tree/info_parser.hpp +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + + namespace boost { namespace property_tree { namespace info_parser +@@ -43,11 +44,12 @@ namespace boost { namespace property_tree { namespace info_parser + void read_info(std::basic_istream &stream, Ptree &pt, + const Ptree &default_ptree) + { +- try { ++ BOOST_TRY { + read_info(stream, pt); +- } catch(file_parser_error &) { ++ } BOOST_CATCH(file_parser_error &) { + pt = default_ptree; + } ++ BOOST_CATCH_END + } + + /** +@@ -87,11 +89,12 @@ namespace boost { namespace property_tree { namespace info_parser + const Ptree &default_ptree, + const std::locale &loc = std::locale()) + { +- try { ++ BOOST_TRY { + read_info(filename, pt, loc); +- } catch(file_parser_error &) { ++ } BOOST_CATCH(file_parser_error &) { + pt = default_ptree; + } ++ BOOST_CATCH_END + } + + /** +diff --git a/boost/property_tree/ini_parser.hpp b/boost/property_tree/ini_parser.hpp +index 50d3c97..5142dbf 100644 +--- a/boost/property_tree/ini_parser.hpp ++++ b/boost/property_tree/ini_parser.hpp +@@ -14,6 +14,8 @@ + #include + #include + #include ++#include ++#include + #include + #include + #include +@@ -165,13 +167,21 @@ namespace boost { namespace property_tree { namespace ini_parser + BOOST_PROPERTY_TREE_THROW(ini_parser_error( + "cannot open file", filename, 0)); + stream.imbue(loc); +- try { ++ ++ // When compiling without exception support there is no formal ++ // parameter "e" in the catch handler. Declaring a local variable ++ // here does not hurt and will be "used" to make the code in the ++ // handler compilable although the code will never be executed. ++ ini_parser_error e("", "", 0); ignore_unused(e); ++ ++ BOOST_TRY { + read_ini(stream, pt); + } +- catch (ini_parser_error &e) { ++ BOOST_CATCH (ini_parser_error &e) { + BOOST_PROPERTY_TREE_THROW(ini_parser_error( + e.message(), filename, e.line())); + } ++ BOOST_CATCH_END + } + + namespace detail +@@ -313,13 +323,21 @@ namespace boost { namespace property_tree { namespace ini_parser + BOOST_PROPERTY_TREE_THROW(ini_parser_error( + "cannot open file", filename, 0)); + stream.imbue(loc); +- try { ++ ++ // When compiling without exception support there is no formal ++ // parameter "e" in the catch handler. Declaring a local variable ++ // here does not hurt and will be "used" to make the code in the ++ // handler compilable although the code will never be executed. ++ ini_parser_error e("", "", 0); ignore_unused(e); ++ ++ BOOST_TRY { + write_ini(stream, pt, flags); + } +- catch (ini_parser_error &e) { ++ BOOST_CATCH (ini_parser_error &e) { + BOOST_PROPERTY_TREE_THROW(ini_parser_error( + e.message(), filename, e.line())); + } ++ BOOST_CATCH_END + } + + } } } + +diff --git a/boost/property_tree/detail/info_parser_read.hpp b/boost/property_tree/detail/info_parser_read.hpp +index c3446b4..b46643a 100644 +--- a/boost/property_tree/detail/info_parser_read.hpp ++++ b/boost/property_tree/detail/info_parser_read.hpp +@@ -13,7 +13,6 @@ + #include "boost/property_tree/ptree.hpp" + #include "boost/property_tree/detail/info_parser_error.hpp" + #include "boost/property_tree/detail/info_parser_utils.hpp" +-#include "boost/core/ignore_unused.hpp" + #include "boost/core/no_exceptions_support.hpp" + #include + #include +@@ -212,12 +211,6 @@ namespace boost { namespace property_tree { namespace info_parser + std::stack stack; + stack.push(&pt); // Push root ptree on stack initially + +- // When compiling without exception support there is no formal +- // parameter "e" in the catch handler. Declaring a local variable +- // here does not hurt and will be "used" to make the code in the +- // handler compilable although the code will never be executed. +- info_parser_error e("", "", 0); ignore_unused(e); +- + BOOST_TRY { + // While there are characters in the stream + while (stream.good()) { +@@ -382,6 +375,7 @@ namespace boost { namespace property_tree { namespace info_parser + } + BOOST_CATCH (info_parser_error &e) + { ++ #ifndef BOOST_NO_EXCEPTIONS + // If line undefined rethrow error with correct filename and line + if (e.line() == 0) + { +@@ -389,7 +383,7 @@ namespace boost { namespace property_tree { namespace info_parser + } + else + BOOST_PROPERTY_TREE_THROW(e); +- ++ #endif + } + BOOST_CATCH_END + +diff --git a/boost/property_tree/detail/xml_parser_read_rapidxml.hpp b/boost/property_tree/detail/xml_parser_read_rapidxml.hpp +index a6b005a..b6f5820 100644 +--- a/boost/property_tree/detail/xml_parser_read_rapidxml.hpp ++++ b/boost/property_tree/detail/xml_parser_read_rapidxml.hpp +@@ -15,7 +15,6 @@ + #include + #include + #include +-#include + #include + #include + +@@ -103,12 +102,6 @@ namespace boost { namespace property_tree { namespace xml_parser + xml_parser_error("read error", filename, 0)); + v.push_back(0); // zero-terminate + +- // When compiling without exception support there is no formal +- // parameter "e" in the catch handler. Declaring a local variable +- // here does not hurt and will be "used" to make the code in the +- // handler compilable although the code will never be executed. +- parse_error e(NULL, NULL); ignore_unused(e); +- + BOOST_TRY { + // Parse using appropriate flags + const int f_tws = parse_normalize_whitespace +@@ -140,10 +133,12 @@ namespace boost { namespace property_tree { namespace xml_parser + // Swap local and result ptrees + pt.swap(local); + } BOOST_CATCH (parse_error &e) { ++ #ifndef BOOST_NO_EXCEPTIONS + long line = static_cast( + std::count(&v.front(), e.where(), Ch('\n')) + 1); + BOOST_PROPERTY_TREE_THROW( + xml_parser_error(e.what(), filename, line)); ++ #endif + } + BOOST_CATCH_END + } +diff --git a/boost/property_tree/ini_parser.hpp b/boost/property_tree/ini_parser.hpp +index 5142dbf..cb63fcc 100644 +--- a/boost/property_tree/ini_parser.hpp ++++ b/boost/property_tree/ini_parser.hpp +@@ -14,7 +14,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -168,18 +167,14 @@ namespace boost { namespace property_tree { namespace ini_parser + "cannot open file", filename, 0)); + stream.imbue(loc); + +- // When compiling without exception support there is no formal +- // parameter "e" in the catch handler. Declaring a local variable +- // here does not hurt and will be "used" to make the code in the +- // handler compilable although the code will never be executed. +- ini_parser_error e("", "", 0); ignore_unused(e); +- + BOOST_TRY { + read_ini(stream, pt); + } + BOOST_CATCH (ini_parser_error &e) { ++ #ifndef BOOST_NO_EXCEPTIONS + BOOST_PROPERTY_TREE_THROW(ini_parser_error( + e.message(), filename, e.line())); ++ #endif + } + BOOST_CATCH_END + } +@@ -324,18 +319,14 @@ namespace boost { namespace property_tree { namespace ini_parser + "cannot open file", filename, 0)); + stream.imbue(loc); + +- // When compiling without exception support there is no formal +- // parameter "e" in the catch handler. Declaring a local variable +- // here does not hurt and will be "used" to make the code in the +- // handler compilable although the code will never be executed. +- ini_parser_error e("", "", 0); ignore_unused(e); +- + BOOST_TRY { + write_ini(stream, pt, flags); + } + BOOST_CATCH (ini_parser_error &e) { ++ #ifndef BOOST_NO_EXCEPTIONS + BOOST_PROPERTY_TREE_THROW(ini_parser_error( + e.message(), filename, e.line())); ++ #endif + } + BOOST_CATCH_END + } diff --git a/scripts/boost/1.75.0/script.sh b/scripts/boost/1.75.0/script.sh new file mode 100755 index 000000000..0692f6b25 --- /dev/null +++ b/scripts/boost/1.75.0/script.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# inherit from boost base (used for all boost library packages) +source ${HERE}/base.sh + +# this package is the one that is header-only +MASON_NAME=boost +MASON_HEADER_ONLY=true + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${HERE}/common.sh + +# override default unpacking to just unpack headers +function mason_load_source { + mason_download \ + https://dl.bintray.com/boostorg/release/${MASON_VERSION}/source/boost_${BOOST_VERSION}.tar.bz2 \ + ${BOOST_SHASUM} + + mason_extract_tar_bz2 boost_${BOOST_VERSION}/boost + + MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION} +} + +# override default "compile" target for just the header install +function mason_compile { + patch -N -p1 < ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff + mkdir -p ${MASON_PREFIX}/include + cp -r ${MASON_ROOT}/.build/boost_${BOOST_VERSION}/boost ${MASON_PREFIX}/include +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/boost_libatomic/1.67.0/.travis.yml b/scripts/boost_libatomic/1.67.0/.travis.yml new file mode 100644 index 000000000..69bd2e783 --- /dev/null +++ b/scripts/boost_libatomic/1.67.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libatomic/1.67.0/script.sh b/scripts/boost_libatomic/1.67.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libatomic/1.67.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libatomic/1.73.0/.travis.yml b/scripts/boost_libatomic/1.73.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libatomic/1.73.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libatomic/1.73.0/script.sh b/scripts/boost_libatomic/1.73.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libatomic/1.73.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libatomic/1.74.0/.travis.yml b/scripts/boost_libatomic/1.74.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libatomic/1.74.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libatomic/1.74.0/script.sh b/scripts/boost_libatomic/1.74.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libatomic/1.74.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libatomic/1.75.0/.travis.yml b/scripts/boost_libatomic/1.75.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libatomic/1.75.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libatomic/1.75.0/script.sh b/scripts/boost_libatomic/1.75.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libatomic/1.75.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libchrono/1.67.0/.travis.yml b/scripts/boost_libchrono/1.67.0/.travis.yml new file mode 100644 index 000000000..69bd2e783 --- /dev/null +++ b/scripts/boost_libchrono/1.67.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libchrono/1.67.0/script.sh b/scripts/boost_libchrono/1.67.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libchrono/1.67.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libchrono/1.73.0/.travis.yml b/scripts/boost_libchrono/1.73.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libchrono/1.73.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libchrono/1.73.0/script.sh b/scripts/boost_libchrono/1.73.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libchrono/1.73.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libchrono/1.74.0/.travis.yml b/scripts/boost_libchrono/1.74.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libchrono/1.74.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libchrono/1.74.0/script.sh b/scripts/boost_libchrono/1.74.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libchrono/1.74.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libchrono/1.75.0/.travis.yml b/scripts/boost_libchrono/1.75.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libchrono/1.75.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libchrono/1.75.0/script.sh b/scripts/boost_libchrono/1.75.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libchrono/1.75.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libdate_time/1.57.0/script.sh b/scripts/boost_libdate_time/1.57.0/script.sh index 1cb0750f3..c66932cf1 100755 --- a/scripts/boost_libdate_time/1.57.0/script.sh +++ b/scripts/boost_libdate_time/1.57.0/script.sh @@ -14,7 +14,7 @@ MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ 397306fa6d0858c4885fbba7d43a0164dcb7f53e export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION2} diff --git a/scripts/boost_libdate_time/1.67.0/.travis.yml b/scripts/boost_libdate_time/1.67.0/.travis.yml new file mode 100644 index 000000000..69bd2e783 --- /dev/null +++ b/scripts/boost_libdate_time/1.67.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libdate_time/1.67.0/script.sh b/scripts/boost_libdate_time/1.67.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libdate_time/1.67.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libdate_time/1.73.0/.travis.yml b/scripts/boost_libdate_time/1.73.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libdate_time/1.73.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libdate_time/1.73.0/script.sh b/scripts/boost_libdate_time/1.73.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libdate_time/1.73.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libdate_time/1.74.0/.travis.yml b/scripts/boost_libdate_time/1.74.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libdate_time/1.74.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libdate_time/1.74.0/script.sh b/scripts/boost_libdate_time/1.74.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libdate_time/1.74.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libdate_time/1.75.0/.travis.yml b/scripts/boost_libdate_time/1.75.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libdate_time/1.75.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libdate_time/1.75.0/script.sh b/scripts/boost_libdate_time/1.75.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libdate_time/1.75.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libfilesystem/1.57.0/script.sh b/scripts/boost_libfilesystem/1.57.0/script.sh index a0f7be7df..389061978 100755 --- a/scripts/boost_libfilesystem/1.57.0/script.sh +++ b/scripts/boost_libfilesystem/1.57.0/script.sh @@ -14,7 +14,7 @@ MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ 397306fa6d0858c4885fbba7d43a0164dcb7f53e export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION2} diff --git a/scripts/boost_libfilesystem/1.67.0/.travis.yml b/scripts/boost_libfilesystem/1.67.0/.travis.yml new file mode 100644 index 000000000..69bd2e783 --- /dev/null +++ b/scripts/boost_libfilesystem/1.67.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libfilesystem/1.67.0/script.sh b/scripts/boost_libfilesystem/1.67.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libfilesystem/1.67.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libfilesystem/1.72.0/.travis.yml b/scripts/boost_libfilesystem/1.72.0/.travis.yml new file mode 100644 index 000000000..574454da7 --- /dev/null +++ b/scripts/boost_libfilesystem/1.72.0/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libfilesystem/1.72.0/script.sh b/scripts/boost_libfilesystem/1.72.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libfilesystem/1.72.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libfilesystem/1.73.0/.travis.yml b/scripts/boost_libfilesystem/1.73.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libfilesystem/1.73.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libfilesystem/1.73.0/script.sh b/scripts/boost_libfilesystem/1.73.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libfilesystem/1.73.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libfilesystem/1.74.0/.travis.yml b/scripts/boost_libfilesystem/1.74.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libfilesystem/1.74.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libfilesystem/1.74.0/script.sh b/scripts/boost_libfilesystem/1.74.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libfilesystem/1.74.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libfilesystem/1.75.0/.travis.yml b/scripts/boost_libfilesystem/1.75.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libfilesystem/1.75.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libfilesystem/1.75.0/script.sh b/scripts/boost_libfilesystem/1.75.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libfilesystem/1.75.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libiostreams/1.57.0/script.sh b/scripts/boost_libiostreams/1.57.0/script.sh index ecae1e915..894ad1bf6 100755 --- a/scripts/boost_libiostreams/1.57.0/script.sh +++ b/scripts/boost_libiostreams/1.57.0/script.sh @@ -14,7 +14,7 @@ MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ 397306fa6d0858c4885fbba7d43a0164dcb7f53e export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION2} diff --git a/scripts/boost_libiostreams/1.67.0/.travis.yml b/scripts/boost_libiostreams/1.67.0/.travis.yml new file mode 100644 index 000000000..69bd2e783 --- /dev/null +++ b/scripts/boost_libiostreams/1.67.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libiostreams/1.67.0/script.sh b/scripts/boost_libiostreams/1.67.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libiostreams/1.67.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libiostreams/1.73.0/.travis.yml b/scripts/boost_libiostreams/1.73.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libiostreams/1.73.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libiostreams/1.73.0/script.sh b/scripts/boost_libiostreams/1.73.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libiostreams/1.73.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libiostreams/1.74.0/.travis.yml b/scripts/boost_libiostreams/1.74.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libiostreams/1.74.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libiostreams/1.74.0/script.sh b/scripts/boost_libiostreams/1.74.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libiostreams/1.74.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libiostreams/1.75.0/.travis.yml b/scripts/boost_libiostreams/1.75.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libiostreams/1.75.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libiostreams/1.75.0/script.sh b/scripts/boost_libiostreams/1.75.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libiostreams/1.75.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libprogram_options/1.57.0/script.sh b/scripts/boost_libprogram_options/1.57.0/script.sh index ed6f39fe6..62af0466c 100755 --- a/scripts/boost_libprogram_options/1.57.0/script.sh +++ b/scripts/boost_libprogram_options/1.57.0/script.sh @@ -14,7 +14,7 @@ MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ 397306fa6d0858c4885fbba7d43a0164dcb7f53e export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION2} diff --git a/scripts/boost_libprogram_options/1.59.0/script.sh b/scripts/boost_libprogram_options/1.59.0/script.sh index ca8e225b9..10f18a6cc 100755 --- a/scripts/boost_libprogram_options/1.59.0/script.sh +++ b/scripts/boost_libprogram_options/1.59.0/script.sh @@ -14,7 +14,7 @@ MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ ff2e48f4d7e3c4b393d41e07a2f5d923b990967d export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION2} diff --git a/scripts/boost_libprogram_options/1.60.0/script.sh b/scripts/boost_libprogram_options/1.60.0/script.sh index 9f657ca90..1d9edd556 100755 --- a/scripts/boost_libprogram_options/1.60.0/script.sh +++ b/scripts/boost_libprogram_options/1.60.0/script.sh @@ -14,7 +14,7 @@ MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ 40a65135d34c3e3a3cdbe681f06745c086e5b941 export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION2} diff --git a/scripts/boost_libprogram_options/1.62.0-cxx11abi/script.sh b/scripts/boost_libprogram_options/1.62.0-cxx11abi/script.sh index e11f76702..6a23d016e 100755 --- a/scripts/boost_libprogram_options/1.62.0-cxx11abi/script.sh +++ b/scripts/boost_libprogram_options/1.62.0-cxx11abi/script.sh @@ -31,7 +31,7 @@ export CXXFLAGS="${CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=1" function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION_DOWNLOAD}/boost_${BOOST_VERSION}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION_DOWNLOAD}/boost_${BOOST_VERSION}.tar.bz2 \ ${BOOST_SHASUM} export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION} diff --git a/scripts/boost_libprogram_options/1.67.0/.travis.yml b/scripts/boost_libprogram_options/1.67.0/.travis.yml new file mode 100644 index 000000000..69bd2e783 --- /dev/null +++ b/scripts/boost_libprogram_options/1.67.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libprogram_options/1.67.0/script.sh b/scripts/boost_libprogram_options/1.67.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libprogram_options/1.67.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libprogram_options/1.73.0/.travis.yml b/scripts/boost_libprogram_options/1.73.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libprogram_options/1.73.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libprogram_options/1.73.0/script.sh b/scripts/boost_libprogram_options/1.73.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libprogram_options/1.73.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libprogram_options/1.74.0/.travis.yml b/scripts/boost_libprogram_options/1.74.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libprogram_options/1.74.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libprogram_options/1.74.0/script.sh b/scripts/boost_libprogram_options/1.74.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libprogram_options/1.74.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libprogram_options/1.75.0/.travis.yml b/scripts/boost_libprogram_options/1.75.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libprogram_options/1.75.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libprogram_options/1.75.0/script.sh b/scripts/boost_libprogram_options/1.75.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libprogram_options/1.75.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libpython/1.57.0/script.sh b/scripts/boost_libpython/1.57.0/script.sh index c75c8c679..1676f068b 100755 --- a/scripts/boost_libpython/1.57.0/script.sh +++ b/scripts/boost_libpython/1.57.0/script.sh @@ -14,7 +14,7 @@ MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ 397306fa6d0858c4885fbba7d43a0164dcb7f53e export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION2} diff --git a/scripts/boost_libpython/1.67.0/.travis.yml b/scripts/boost_libpython/1.67.0/.travis.yml new file mode 100644 index 000000000..69bd2e783 --- /dev/null +++ b/scripts/boost_libpython/1.67.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libpython/1.67.0/patch.diff b/scripts/boost_libpython/1.67.0/patch.diff new file mode 100644 index 000000000..f0fa2b156 --- /dev/null +++ b/scripts/boost_libpython/1.67.0/patch.diff @@ -0,0 +1,12 @@ +--- libs/python/src/converter/builtin_converters.cpp 2012-12-07 11:51:06.000000000 -0800 ++++ libs/python/src/converter/builtin_converters.cpp 2014-04-01 17:24:37.000000000 -0700 +@@ -32,7 +32,9 @@ + + void shared_ptr_deleter::operator()(void const*) + { ++ PyGILState_STATE gil = PyGILState_Ensure(); + owner.reset(); ++ PyGILState_Release(gil); + } + + namespace diff --git a/scripts/boost_libpython/1.67.0/script.sh b/scripts/boost_libpython/1.67.0/script.sh new file mode 100755 index 000000000..2bf1e1793 --- /dev/null +++ b/scripts/boost_libpython/1.67.0/script.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +PYTHON_VERSION="2.7" +PYTHON_VERSION_NO_DOT=${PYTHON_VERSION/.} +# NOTE: as of boost 1.67.0 it appears the static library has the python version embedded +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function write_python_config() { +# usage: +# write_python_config +local PYTHON_VERSION=$2 +# note: apple pythons need '/System' +PYTHON_BASE=$3 +# note: python 3 uses 'm' +PYTHON_VARIANT=$4 +if [[ $(uname -s) == 'Darwin' ]]; then + echo " + using python + : ${PYTHON_VERSION} # version + : ${PYTHON_BASE}/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/bin/python${PYTHON_VERSION}${PYTHON_VARIANT} # cmd-or-prefix + : ${PYTHON_BASE}/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/include/python${PYTHON_VERSION}${PYTHON_VARIANT} # includes + : ${PYTHON_BASE}/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/lib/python${PYTHON_VERSION}/config${PYTHON_VARIANT} # a lib actually symlink + : ${BOOST_TOOLSET} # condition + ; + " >> $1 +else + if [[ $(uname -s) == 'FreeBSD' ]]; then + echo " + using python + : ${PYTHON_VERSION} # version + : /usr/local/bin/python${PYTHON_VERSION}${PYTHON_VARIANT} # cmd-or-prefix + : /usr/local/include/python${PYTHON_VERSION} # includes + : /usr/local/lib/python${PYTHON_VERSION}/config${PYTHON_VARIANT} + : ${BOOST_TOOLSET} # condition + ; + " >> $1 + else + echo " + using python + : ${PYTHON_VERSION} # version + : /usr/bin/python${PYTHON_VERSION}${PYTHON_VARIANT} # cmd-or-prefix + : /usr/include/python${PYTHON_VERSION} # includes + : /usr/lib/python${PYTHON_VERSION}/config${PYTHON_VARIANT} + : ${BOOST_TOOLSET} # condition + ; + " >> $1 + fi +fi +} + +function mason_compile { + # patch to workaround crashes in python.input + # https://github.com/mapnik/mapnik/issues/1968 + mason_step "Loading patch ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff" + patch -N -p0 < ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff + write_python_config user-config.jam ${PYTHON_VERSION} "/System" "" + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -d0 \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + # NOTE: we strip the python version to make linking easier + mv stage/lib/libboost_${BOOST_LIBRARY}${PYTHON_VERSION_NO_DOT}.a ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libpython/1.73.0/.travis.yml b/scripts/boost_libpython/1.73.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libpython/1.73.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libpython/1.73.0/patch.diff b/scripts/boost_libpython/1.73.0/patch.diff new file mode 100644 index 000000000..f0fa2b156 --- /dev/null +++ b/scripts/boost_libpython/1.73.0/patch.diff @@ -0,0 +1,12 @@ +--- libs/python/src/converter/builtin_converters.cpp 2012-12-07 11:51:06.000000000 -0800 ++++ libs/python/src/converter/builtin_converters.cpp 2014-04-01 17:24:37.000000000 -0700 +@@ -32,7 +32,9 @@ + + void shared_ptr_deleter::operator()(void const*) + { ++ PyGILState_STATE gil = PyGILState_Ensure(); + owner.reset(); ++ PyGILState_Release(gil); + } + + namespace diff --git a/scripts/boost_libpython/1.73.0/script.sh b/scripts/boost_libpython/1.73.0/script.sh new file mode 100755 index 000000000..2bf1e1793 --- /dev/null +++ b/scripts/boost_libpython/1.73.0/script.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +PYTHON_VERSION="2.7" +PYTHON_VERSION_NO_DOT=${PYTHON_VERSION/.} +# NOTE: as of boost 1.67.0 it appears the static library has the python version embedded +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function write_python_config() { +# usage: +# write_python_config +local PYTHON_VERSION=$2 +# note: apple pythons need '/System' +PYTHON_BASE=$3 +# note: python 3 uses 'm' +PYTHON_VARIANT=$4 +if [[ $(uname -s) == 'Darwin' ]]; then + echo " + using python + : ${PYTHON_VERSION} # version + : ${PYTHON_BASE}/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/bin/python${PYTHON_VERSION}${PYTHON_VARIANT} # cmd-or-prefix + : ${PYTHON_BASE}/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/include/python${PYTHON_VERSION}${PYTHON_VARIANT} # includes + : ${PYTHON_BASE}/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/lib/python${PYTHON_VERSION}/config${PYTHON_VARIANT} # a lib actually symlink + : ${BOOST_TOOLSET} # condition + ; + " >> $1 +else + if [[ $(uname -s) == 'FreeBSD' ]]; then + echo " + using python + : ${PYTHON_VERSION} # version + : /usr/local/bin/python${PYTHON_VERSION}${PYTHON_VARIANT} # cmd-or-prefix + : /usr/local/include/python${PYTHON_VERSION} # includes + : /usr/local/lib/python${PYTHON_VERSION}/config${PYTHON_VARIANT} + : ${BOOST_TOOLSET} # condition + ; + " >> $1 + else + echo " + using python + : ${PYTHON_VERSION} # version + : /usr/bin/python${PYTHON_VERSION}${PYTHON_VARIANT} # cmd-or-prefix + : /usr/include/python${PYTHON_VERSION} # includes + : /usr/lib/python${PYTHON_VERSION}/config${PYTHON_VARIANT} + : ${BOOST_TOOLSET} # condition + ; + " >> $1 + fi +fi +} + +function mason_compile { + # patch to workaround crashes in python.input + # https://github.com/mapnik/mapnik/issues/1968 + mason_step "Loading patch ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff" + patch -N -p0 < ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff + write_python_config user-config.jam ${PYTHON_VERSION} "/System" "" + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -d0 \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + # NOTE: we strip the python version to make linking easier + mv stage/lib/libboost_${BOOST_LIBRARY}${PYTHON_VERSION_NO_DOT}.a ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libpython/1.74.0/.travis.yml b/scripts/boost_libpython/1.74.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libpython/1.74.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libpython/1.74.0/patch.diff b/scripts/boost_libpython/1.74.0/patch.diff new file mode 100644 index 000000000..f0fa2b156 --- /dev/null +++ b/scripts/boost_libpython/1.74.0/patch.diff @@ -0,0 +1,12 @@ +--- libs/python/src/converter/builtin_converters.cpp 2012-12-07 11:51:06.000000000 -0800 ++++ libs/python/src/converter/builtin_converters.cpp 2014-04-01 17:24:37.000000000 -0700 +@@ -32,7 +32,9 @@ + + void shared_ptr_deleter::operator()(void const*) + { ++ PyGILState_STATE gil = PyGILState_Ensure(); + owner.reset(); ++ PyGILState_Release(gil); + } + + namespace diff --git a/scripts/boost_libpython/1.74.0/script.sh b/scripts/boost_libpython/1.74.0/script.sh new file mode 100755 index 000000000..2bf1e1793 --- /dev/null +++ b/scripts/boost_libpython/1.74.0/script.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +PYTHON_VERSION="2.7" +PYTHON_VERSION_NO_DOT=${PYTHON_VERSION/.} +# NOTE: as of boost 1.67.0 it appears the static library has the python version embedded +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function write_python_config() { +# usage: +# write_python_config +local PYTHON_VERSION=$2 +# note: apple pythons need '/System' +PYTHON_BASE=$3 +# note: python 3 uses 'm' +PYTHON_VARIANT=$4 +if [[ $(uname -s) == 'Darwin' ]]; then + echo " + using python + : ${PYTHON_VERSION} # version + : ${PYTHON_BASE}/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/bin/python${PYTHON_VERSION}${PYTHON_VARIANT} # cmd-or-prefix + : ${PYTHON_BASE}/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/include/python${PYTHON_VERSION}${PYTHON_VARIANT} # includes + : ${PYTHON_BASE}/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/lib/python${PYTHON_VERSION}/config${PYTHON_VARIANT} # a lib actually symlink + : ${BOOST_TOOLSET} # condition + ; + " >> $1 +else + if [[ $(uname -s) == 'FreeBSD' ]]; then + echo " + using python + : ${PYTHON_VERSION} # version + : /usr/local/bin/python${PYTHON_VERSION}${PYTHON_VARIANT} # cmd-or-prefix + : /usr/local/include/python${PYTHON_VERSION} # includes + : /usr/local/lib/python${PYTHON_VERSION}/config${PYTHON_VARIANT} + : ${BOOST_TOOLSET} # condition + ; + " >> $1 + else + echo " + using python + : ${PYTHON_VERSION} # version + : /usr/bin/python${PYTHON_VERSION}${PYTHON_VARIANT} # cmd-or-prefix + : /usr/include/python${PYTHON_VERSION} # includes + : /usr/lib/python${PYTHON_VERSION}/config${PYTHON_VARIANT} + : ${BOOST_TOOLSET} # condition + ; + " >> $1 + fi +fi +} + +function mason_compile { + # patch to workaround crashes in python.input + # https://github.com/mapnik/mapnik/issues/1968 + mason_step "Loading patch ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff" + patch -N -p0 < ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff + write_python_config user-config.jam ${PYTHON_VERSION} "/System" "" + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -d0 \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + # NOTE: we strip the python version to make linking easier + mv stage/lib/libboost_${BOOST_LIBRARY}${PYTHON_VERSION_NO_DOT}.a ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libpython/1.75.0/.travis.yml b/scripts/boost_libpython/1.75.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libpython/1.75.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libpython/1.75.0/patch.diff b/scripts/boost_libpython/1.75.0/patch.diff new file mode 100644 index 000000000..f0fa2b156 --- /dev/null +++ b/scripts/boost_libpython/1.75.0/patch.diff @@ -0,0 +1,12 @@ +--- libs/python/src/converter/builtin_converters.cpp 2012-12-07 11:51:06.000000000 -0800 ++++ libs/python/src/converter/builtin_converters.cpp 2014-04-01 17:24:37.000000000 -0700 +@@ -32,7 +32,9 @@ + + void shared_ptr_deleter::operator()(void const*) + { ++ PyGILState_STATE gil = PyGILState_Ensure(); + owner.reset(); ++ PyGILState_Release(gil); + } + + namespace diff --git a/scripts/boost_libpython/1.75.0/script.sh b/scripts/boost_libpython/1.75.0/script.sh new file mode 100755 index 000000000..2bf1e1793 --- /dev/null +++ b/scripts/boost_libpython/1.75.0/script.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +PYTHON_VERSION="2.7" +PYTHON_VERSION_NO_DOT=${PYTHON_VERSION/.} +# NOTE: as of boost 1.67.0 it appears the static library has the python version embedded +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function write_python_config() { +# usage: +# write_python_config +local PYTHON_VERSION=$2 +# note: apple pythons need '/System' +PYTHON_BASE=$3 +# note: python 3 uses 'm' +PYTHON_VARIANT=$4 +if [[ $(uname -s) == 'Darwin' ]]; then + echo " + using python + : ${PYTHON_VERSION} # version + : ${PYTHON_BASE}/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/bin/python${PYTHON_VERSION}${PYTHON_VARIANT} # cmd-or-prefix + : ${PYTHON_BASE}/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/include/python${PYTHON_VERSION}${PYTHON_VARIANT} # includes + : ${PYTHON_BASE}/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/lib/python${PYTHON_VERSION}/config${PYTHON_VARIANT} # a lib actually symlink + : ${BOOST_TOOLSET} # condition + ; + " >> $1 +else + if [[ $(uname -s) == 'FreeBSD' ]]; then + echo " + using python + : ${PYTHON_VERSION} # version + : /usr/local/bin/python${PYTHON_VERSION}${PYTHON_VARIANT} # cmd-or-prefix + : /usr/local/include/python${PYTHON_VERSION} # includes + : /usr/local/lib/python${PYTHON_VERSION}/config${PYTHON_VARIANT} + : ${BOOST_TOOLSET} # condition + ; + " >> $1 + else + echo " + using python + : ${PYTHON_VERSION} # version + : /usr/bin/python${PYTHON_VERSION}${PYTHON_VARIANT} # cmd-or-prefix + : /usr/include/python${PYTHON_VERSION} # includes + : /usr/lib/python${PYTHON_VERSION}/config${PYTHON_VARIANT} + : ${BOOST_TOOLSET} # condition + ; + " >> $1 + fi +fi +} + +function mason_compile { + # patch to workaround crashes in python.input + # https://github.com/mapnik/mapnik/issues/1968 + mason_step "Loading patch ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff" + patch -N -p0 < ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff + write_python_config user-config.jam ${PYTHON_VERSION} "/System" "" + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -d0 \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + # NOTE: we strip the python version to make linking easier + mv stage/lib/libboost_${BOOST_LIBRARY}${PYTHON_VERSION_NO_DOT}.a ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libregex/1.57.0/script.sh b/scripts/boost_libregex/1.57.0/script.sh index bcab4a16d..eec911b0c 100755 --- a/scripts/boost_libregex/1.57.0/script.sh +++ b/scripts/boost_libregex/1.57.0/script.sh @@ -14,7 +14,7 @@ MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ 397306fa6d0858c4885fbba7d43a0164dcb7f53e export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION2} diff --git a/scripts/boost_libregex/1.67.0/.travis.yml b/scripts/boost_libregex/1.67.0/.travis.yml new file mode 100644 index 000000000..69bd2e783 --- /dev/null +++ b/scripts/boost_libregex/1.67.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex/1.67.0/script.sh b/scripts/boost_libregex/1.67.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libregex/1.67.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libregex/1.73.0/.travis.yml b/scripts/boost_libregex/1.73.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libregex/1.73.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex/1.73.0/script.sh b/scripts/boost_libregex/1.73.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libregex/1.73.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libregex/1.74.0/.travis.yml b/scripts/boost_libregex/1.74.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libregex/1.74.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex/1.74.0/script.sh b/scripts/boost_libregex/1.74.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libregex/1.74.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libregex/1.75.0/.travis.yml b/scripts/boost_libregex/1.75.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libregex/1.75.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex/1.75.0/script.sh b/scripts/boost_libregex/1.75.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libregex/1.75.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libregex_icu/1.67.0/.travis.yml b/scripts/boost_libregex_icu/1.67.0/.travis.yml new file mode 100644 index 000000000..69bd2e783 --- /dev/null +++ b/scripts/boost_libregex_icu/1.67.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex_icu/1.67.0/script.sh b/scripts/boost_libregex_icu/1.67.0/script.sh new file mode 100755 index 000000000..de94e5385 --- /dev/null +++ b/scripts/boost_libregex_icu/1.67.0/script.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +# Note: cannot deduce from directory since it is named in a custom way +#BOOST_LIBRARY=${THIS_DIR#boost_lib} +BOOST_LIBRARY=regex +MASON_NAME=boost_lib${BOOST_LIBRARY}_icu +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function mason_prepare_compile { + ${MASON_DIR}/mason install icu 55.1 + MASON_ICU=$(${MASON_DIR}/mason prefix icu 55.1) +} + +# custom compile that gets icu working +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + echo 'int main() { return 0; }' > libs/regex/build/has_icu_test.cpp + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -sHAVE_ICU=1 -sICU_PATH=${MASON_ICU} --reconfigure --debug-configuration \ + -d0 \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libregex_icu/1.73.0/.travis.yml b/scripts/boost_libregex_icu/1.73.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libregex_icu/1.73.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex_icu/1.73.0/script.sh b/scripts/boost_libregex_icu/1.73.0/script.sh new file mode 100755 index 000000000..de94e5385 --- /dev/null +++ b/scripts/boost_libregex_icu/1.73.0/script.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +# Note: cannot deduce from directory since it is named in a custom way +#BOOST_LIBRARY=${THIS_DIR#boost_lib} +BOOST_LIBRARY=regex +MASON_NAME=boost_lib${BOOST_LIBRARY}_icu +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function mason_prepare_compile { + ${MASON_DIR}/mason install icu 55.1 + MASON_ICU=$(${MASON_DIR}/mason prefix icu 55.1) +} + +# custom compile that gets icu working +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + echo 'int main() { return 0; }' > libs/regex/build/has_icu_test.cpp + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -sHAVE_ICU=1 -sICU_PATH=${MASON_ICU} --reconfigure --debug-configuration \ + -d0 \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libregex_icu/1.74.0/.travis.yml b/scripts/boost_libregex_icu/1.74.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libregex_icu/1.74.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex_icu/1.74.0/script.sh b/scripts/boost_libregex_icu/1.74.0/script.sh new file mode 100755 index 000000000..de94e5385 --- /dev/null +++ b/scripts/boost_libregex_icu/1.74.0/script.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +# Note: cannot deduce from directory since it is named in a custom way +#BOOST_LIBRARY=${THIS_DIR#boost_lib} +BOOST_LIBRARY=regex +MASON_NAME=boost_lib${BOOST_LIBRARY}_icu +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function mason_prepare_compile { + ${MASON_DIR}/mason install icu 55.1 + MASON_ICU=$(${MASON_DIR}/mason prefix icu 55.1) +} + +# custom compile that gets icu working +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + echo 'int main() { return 0; }' > libs/regex/build/has_icu_test.cpp + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -sHAVE_ICU=1 -sICU_PATH=${MASON_ICU} --reconfigure --debug-configuration \ + -d0 \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libregex_icu/1.75.0/.travis.yml b/scripts/boost_libregex_icu/1.75.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libregex_icu/1.75.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex_icu/1.75.0/script.sh b/scripts/boost_libregex_icu/1.75.0/script.sh new file mode 100755 index 000000000..de94e5385 --- /dev/null +++ b/scripts/boost_libregex_icu/1.75.0/script.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +# Note: cannot deduce from directory since it is named in a custom way +#BOOST_LIBRARY=${THIS_DIR#boost_lib} +BOOST_LIBRARY=regex +MASON_NAME=boost_lib${BOOST_LIBRARY}_icu +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function mason_prepare_compile { + ${MASON_DIR}/mason install icu 55.1 + MASON_ICU=$(${MASON_DIR}/mason prefix icu 55.1) +} + +# custom compile that gets icu working +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + echo 'int main() { return 0; }' > libs/regex/build/has_icu_test.cpp + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -sHAVE_ICU=1 -sICU_PATH=${MASON_ICU} --reconfigure --debug-configuration \ + -d0 \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libregex_icu57/1.67.0/.travis.yml b/scripts/boost_libregex_icu57/1.67.0/.travis.yml new file mode 100644 index 000000000..a0b3857c0 --- /dev/null +++ b/scripts/boost_libregex_icu57/1.67.0/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex_icu57/1.67.0/script.sh b/scripts/boost_libregex_icu57/1.67.0/script.sh new file mode 100755 index 000000000..1f9d09957 --- /dev/null +++ b/scripts/boost_libregex_icu57/1.67.0/script.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +# Note: cannot deduce from directory since it is named in a custom way +#BOOST_LIBRARY=${THIS_DIR#boost_lib} +BOOST_LIBRARY=regex +MASON_NAME=boost_lib${BOOST_LIBRARY}_icu57 +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function mason_prepare_compile { + ${MASON_DIR}/mason install icu 57.1 + MASON_ICU=$(${MASON_DIR}/mason prefix icu 57.1) +} + +# custom compile that gets icu working +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + echo 'int main() { return 0; }' > libs/regex/build/has_icu_test.cpp + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -sHAVE_ICU=1 -sICU_PATH=${MASON_ICU} --reconfigure --debug-configuration \ + -d0 -a \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="-fvisibility=hidden ${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libregex_icu57/1.73.0/.travis.yml b/scripts/boost_libregex_icu57/1.73.0/.travis.yml new file mode 100644 index 000000000..48091fd3f --- /dev/null +++ b/scripts/boost_libregex_icu57/1.73.0/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex_icu57/1.73.0/script.sh b/scripts/boost_libregex_icu57/1.73.0/script.sh new file mode 100755 index 000000000..1f9d09957 --- /dev/null +++ b/scripts/boost_libregex_icu57/1.73.0/script.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +# Note: cannot deduce from directory since it is named in a custom way +#BOOST_LIBRARY=${THIS_DIR#boost_lib} +BOOST_LIBRARY=regex +MASON_NAME=boost_lib${BOOST_LIBRARY}_icu57 +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function mason_prepare_compile { + ${MASON_DIR}/mason install icu 57.1 + MASON_ICU=$(${MASON_DIR}/mason prefix icu 57.1) +} + +# custom compile that gets icu working +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + echo 'int main() { return 0; }' > libs/regex/build/has_icu_test.cpp + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -sHAVE_ICU=1 -sICU_PATH=${MASON_ICU} --reconfigure --debug-configuration \ + -d0 -a \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="-fvisibility=hidden ${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libregex_icu57/1.74.0/.travis.yml b/scripts/boost_libregex_icu57/1.74.0/.travis.yml new file mode 100644 index 000000000..48091fd3f --- /dev/null +++ b/scripts/boost_libregex_icu57/1.74.0/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex_icu57/1.74.0/script.sh b/scripts/boost_libregex_icu57/1.74.0/script.sh new file mode 100755 index 000000000..1f9d09957 --- /dev/null +++ b/scripts/boost_libregex_icu57/1.74.0/script.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +# Note: cannot deduce from directory since it is named in a custom way +#BOOST_LIBRARY=${THIS_DIR#boost_lib} +BOOST_LIBRARY=regex +MASON_NAME=boost_lib${BOOST_LIBRARY}_icu57 +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function mason_prepare_compile { + ${MASON_DIR}/mason install icu 57.1 + MASON_ICU=$(${MASON_DIR}/mason prefix icu 57.1) +} + +# custom compile that gets icu working +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + echo 'int main() { return 0; }' > libs/regex/build/has_icu_test.cpp + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -sHAVE_ICU=1 -sICU_PATH=${MASON_ICU} --reconfigure --debug-configuration \ + -d0 -a \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="-fvisibility=hidden ${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libregex_icu57/1.75.0/.travis.yml b/scripts/boost_libregex_icu57/1.75.0/.travis.yml new file mode 100644 index 000000000..48091fd3f --- /dev/null +++ b/scripts/boost_libregex_icu57/1.75.0/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex_icu57/1.75.0/script.sh b/scripts/boost_libregex_icu57/1.75.0/script.sh new file mode 100755 index 000000000..1f9d09957 --- /dev/null +++ b/scripts/boost_libregex_icu57/1.75.0/script.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +# Note: cannot deduce from directory since it is named in a custom way +#BOOST_LIBRARY=${THIS_DIR#boost_lib} +BOOST_LIBRARY=regex +MASON_NAME=boost_lib${BOOST_LIBRARY}_icu57 +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function mason_prepare_compile { + ${MASON_DIR}/mason install icu 57.1 + MASON_ICU=$(${MASON_DIR}/mason prefix icu 57.1) +} + +# custom compile that gets icu working +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + echo 'int main() { return 0; }' > libs/regex/build/has_icu_test.cpp + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -sHAVE_ICU=1 -sICU_PATH=${MASON_ICU} --reconfigure --debug-configuration \ + -d0 -a \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="-fvisibility=hidden ${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libregex_icu58/1.66.0/.travis.yml b/scripts/boost_libregex_icu58/1.66.0/.travis.yml new file mode 100644 index 000000000..a0b3857c0 --- /dev/null +++ b/scripts/boost_libregex_icu58/1.66.0/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex_icu58/1.66.0/script.sh b/scripts/boost_libregex_icu58/1.66.0/script.sh new file mode 100755 index 000000000..fe538d33c --- /dev/null +++ b/scripts/boost_libregex_icu58/1.66.0/script.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +# Note: cannot deduce from directory since it is named in a custom way +#BOOST_LIBRARY=${THIS_DIR#boost_lib} +BOOST_LIBRARY=regex +MASON_NAME=boost_lib${BOOST_LIBRARY}_icu58 +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function mason_prepare_compile { + ${MASON_DIR}/mason install icu 58.1 + MASON_ICU=$(${MASON_DIR}/mason prefix icu 58.1) +} + +# custom compile that gets icu working +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + echo 'int main() { return 0; }' > libs/regex/build/has_icu_test.cpp + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -sHAVE_ICU=1 -sICU_PATH=${MASON_ICU} --reconfigure --debug-configuration \ + -d0 -a \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="-fvisibility=hidden ${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libregex_icu58/1.74.0/.travis.yml b/scripts/boost_libregex_icu58/1.74.0/.travis.yml new file mode 100644 index 000000000..d67dc67b7 --- /dev/null +++ b/scripts/boost_libregex_icu58/1.74.0/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-6-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex_icu58/1.74.0/script.sh b/scripts/boost_libregex_icu58/1.74.0/script.sh new file mode 100755 index 000000000..fe538d33c --- /dev/null +++ b/scripts/boost_libregex_icu58/1.74.0/script.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +# Note: cannot deduce from directory since it is named in a custom way +#BOOST_LIBRARY=${THIS_DIR#boost_lib} +BOOST_LIBRARY=regex +MASON_NAME=boost_lib${BOOST_LIBRARY}_icu58 +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function mason_prepare_compile { + ${MASON_DIR}/mason install icu 58.1 + MASON_ICU=$(${MASON_DIR}/mason prefix icu 58.1) +} + +# custom compile that gets icu working +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + echo 'int main() { return 0; }' > libs/regex/build/has_icu_test.cpp + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -sHAVE_ICU=1 -sICU_PATH=${MASON_ICU} --reconfigure --debug-configuration \ + -d0 -a \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="-fvisibility=hidden ${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libregex_icu58/1.75.0/.travis.yml b/scripts/boost_libregex_icu58/1.75.0/.travis.yml new file mode 100644 index 000000000..d67dc67b7 --- /dev/null +++ b/scripts/boost_libregex_icu58/1.75.0/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-6-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libregex_icu58/1.75.0/script.sh b/scripts/boost_libregex_icu58/1.75.0/script.sh new file mode 100755 index 000000000..fe538d33c --- /dev/null +++ b/scripts/boost_libregex_icu58/1.75.0/script.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +# Note: cannot deduce from directory since it is named in a custom way +#BOOST_LIBRARY=${THIS_DIR#boost_lib} +BOOST_LIBRARY=regex +MASON_NAME=boost_lib${BOOST_LIBRARY}_icu58 +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +function mason_prepare_compile { + ${MASON_DIR}/mason install icu 58.1 + MASON_ICU=$(${MASON_DIR}/mason prefix icu 58.1) +} + +# custom compile that gets icu working +function mason_compile { + gen_config ${BOOST_TOOLSET} ${BOOST_TOOLSET_CXX} + if [[ ! -f ./b2 ]] ; then + ./bootstrap.sh + fi + echo 'int main() { return 0; }' > libs/regex/build/has_icu_test.cpp + ./b2 \ + --with-${BOOST_LIBRARY} \ + --prefix=${MASON_PREFIX} \ + -j${MASON_CONCURRENCY} \ + -sHAVE_ICU=1 -sICU_PATH=${MASON_ICU} --reconfigure --debug-configuration \ + -d0 -a \ + --ignore-site-config --user-config=user-config.jam \ + architecture="${BOOST_ARCH}" \ + toolset="${BOOST_TOOLSET}" \ + link=static \ + variant=release \ + linkflags="${LDFLAGS:-" "}" \ + cxxflags="-fvisibility=hidden ${CXXFLAGS:-" "}" \ + stage + mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE}) + mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +mason_run "$@" diff --git a/scripts/boost_libsystem/1.57.0/script.sh b/scripts/boost_libsystem/1.57.0/script.sh index 5a1f92447..71f80fa75 100755 --- a/scripts/boost_libsystem/1.57.0/script.sh +++ b/scripts/boost_libsystem/1.57.0/script.sh @@ -14,7 +14,7 @@ MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ 397306fa6d0858c4885fbba7d43a0164dcb7f53e export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION2} diff --git a/scripts/boost_libsystem/1.67.0/.travis.yml b/scripts/boost_libsystem/1.67.0/.travis.yml new file mode 100644 index 000000000..69bd2e783 --- /dev/null +++ b/scripts/boost_libsystem/1.67.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libsystem/1.67.0/script.sh b/scripts/boost_libsystem/1.67.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libsystem/1.67.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libsystem/1.72.0/.travis.yml b/scripts/boost_libsystem/1.72.0/.travis.yml new file mode 100644 index 000000000..574454da7 --- /dev/null +++ b/scripts/boost_libsystem/1.72.0/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libsystem/1.72.0/script.sh b/scripts/boost_libsystem/1.72.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libsystem/1.72.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libsystem/1.73.0/.travis.yml b/scripts/boost_libsystem/1.73.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libsystem/1.73.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libsystem/1.73.0/script.sh b/scripts/boost_libsystem/1.73.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libsystem/1.73.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libsystem/1.74.0/.travis.yml b/scripts/boost_libsystem/1.74.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libsystem/1.74.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libsystem/1.74.0/script.sh b/scripts/boost_libsystem/1.74.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libsystem/1.74.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libsystem/1.75.0/.travis.yml b/scripts/boost_libsystem/1.75.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libsystem/1.75.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libsystem/1.75.0/script.sh b/scripts/boost_libsystem/1.75.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libsystem/1.75.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libtest/1.57.0/script.sh b/scripts/boost_libtest/1.57.0/script.sh index 57104ca44..5d167675f 100755 --- a/scripts/boost_libtest/1.57.0/script.sh +++ b/scripts/boost_libtest/1.57.0/script.sh @@ -14,7 +14,7 @@ MASON_LIB_FILE=lib/libboost_unit_test_framework.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ 397306fa6d0858c4885fbba7d43a0164dcb7f53e export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION2} diff --git a/scripts/boost_libtest/1.67.0/.travis.yml b/scripts/boost_libtest/1.67.0/.travis.yml new file mode 100644 index 000000000..69bd2e783 --- /dev/null +++ b/scripts/boost_libtest/1.67.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libtest/1.67.0/script.sh b/scripts/boost_libtest/1.67.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libtest/1.67.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libtest/1.73.0/.travis.yml b/scripts/boost_libtest/1.73.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libtest/1.73.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libtest/1.73.0/script.sh b/scripts/boost_libtest/1.73.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libtest/1.73.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libtest/1.74.0/.travis.yml b/scripts/boost_libtest/1.74.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libtest/1.74.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libtest/1.74.0/script.sh b/scripts/boost_libtest/1.74.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libtest/1.74.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libtest/1.75.0/.travis.yml b/scripts/boost_libtest/1.75.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libtest/1.75.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libtest/1.75.0/script.sh b/scripts/boost_libtest/1.75.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libtest/1.75.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libthread/1.57.0/script.sh b/scripts/boost_libthread/1.57.0/script.sh index 318accde5..534a1377d 100755 --- a/scripts/boost_libthread/1.57.0/script.sh +++ b/scripts/boost_libthread/1.57.0/script.sh @@ -14,7 +14,7 @@ MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ + https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \ 397306fa6d0858c4885fbba7d43a0164dcb7f53e export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION2} diff --git a/scripts/boost_libthread/1.67.0/.travis.yml b/scripts/boost_libthread/1.67.0/.travis.yml new file mode 100644 index 000000000..69bd2e783 --- /dev/null +++ b/scripts/boost_libthread/1.67.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libthread/1.67.0/script.sh b/scripts/boost_libthread/1.67.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libthread/1.67.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libthread/1.73.0/.travis.yml b/scripts/boost_libthread/1.73.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libthread/1.73.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libthread/1.73.0/script.sh b/scripts/boost_libthread/1.73.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libthread/1.73.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libthread/1.74.0/.travis.yml b/scripts/boost_libthread/1.74.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libthread/1.74.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libthread/1.74.0/script.sh b/scripts/boost_libthread/1.74.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libthread/1.74.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/boost_libthread/1.75.0/.travis.yml b/scripts/boost_libthread/1.75.0/.travis.yml new file mode 100644 index 000000000..c343fec4a --- /dev/null +++ b/scripts/boost_libthread/1.75.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/boost_libthread/1.75.0/script.sh b/scripts/boost_libthread/1.75.0/script.sh new file mode 100755 index 000000000..24b9c6418 --- /dev/null +++ b/scripts/boost_libthread/1.75.0/script.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# key properties unique to this library +THIS_DIR=$(basename $(dirname $HERE)) +BOOST_LIBRARY=${THIS_DIR#boost_lib} +MASON_NAME=boost_lib${BOOST_LIBRARY} +MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a +# hack for inconsistently named test lib +if [[ ${MASON_LIB_FILE} == "lib/libboost_test.a" ]]; then + MASON_LIB_FILE=lib/libboost_unit_test_framework.a +fi + +# inherit from boost base (used for all boost library packages) +BASE_PATH=${HERE}/../../boost/$(basename $HERE) +source ${BASE_PATH}/base.sh + +# setup mason env +. ${MASON_DIR}/mason.sh + +# source common build functions +source ${BASE_PATH}/common.sh + +mason_run "$@" diff --git a/scripts/build2/0.8.0/.travis.yml b/scripts/build2/0.8.0/.travis.yml new file mode 100644 index 000000000..ba43c2c92 --- /dev/null +++ b/scripts/build2/0.8.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.3 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/build2/0.8.0/patch.diff b/scripts/build2/0.8.0/patch.diff new file mode 100644 index 000000000..853054575 --- /dev/null +++ b/scripts/build2/0.8.0/patch.diff @@ -0,0 +1,41 @@ +diff --git a/build.sh b/build.sh +index a0c4f55..eac0c77 100755 +--- a/build.sh ++++ b/build.sh +@@ -260,7 +260,7 @@ run build2/b-boot --version + + # Bootstrap, stage 2. + # +-run build2/b-boot $verbose config.cxx="$cxx" config.bin.lib=static build2/exe{b} ++run build2/b-boot $verbose config.cxx="$cxx" config.cc.coptions="$*" config.bin.lib=static config.bin.exe.lib=static config.cxx.loptions="${LDFLAGS}" build2/exe{b} + mv build2/b build2/b-boot + run build2/b-boot --version + +@@ -269,7 +269,7 @@ run build2/b-boot --version + run cd .. + + run build2/build2/b-boot $verbose configure \ +-config.cxx="$cxx" \ ++config.cxx="$cxx" config.cc.coptions="$*" config.bin.lib=static config.bin.exe.lib=static config.cxx.loptions="${LDFLAGS}" \ + config.bin.suffix=-stage \ + config.bin.rpath="$conf_rpath" \ + config.install.root="$idir" \ +@@ -293,7 +293,7 @@ cdir="$(pwd)" # Save full path for later. + + run bpkg-stage $verbose create \ + cc \ +-config.cxx="$cxx" \ ++config.cxx="$cxx" config.bin.lib=static config.bin.exe.lib=static config.cxx.loptions="${LDFLAGS}" \ + config.cc.coptions="$*" \ + config.bin.rpath="$conf_rpath" \ + config.install.root="$idir" \ +diff --git a/build2/bootstrap.sh b/build2/bootstrap.sh +index e6088c2..541b3be 100755 +--- a/build2/bootstrap.sh ++++ b/build2/bootstrap.sh +@@ -136,4 +136,4 @@ src="$src $libbutl/libbutl/*.cxx" + # mode since 4.9 doesn't recognize c++1z. + # + set -x +-"$cxx" "-I$libbutl" -I. -DBUILD2_BOOTSTRAP '-DBUILD2_HOST_TRIPLET="'"$host"'"' -std=c++1y "$@" -o build2/b-boot $src -lpthread ++"$cxx" "-I$libbutl" -I. -DBUILD2_BOOTSTRAP '-DBUILD2_HOST_TRIPLET="'"$host"'"' -std=c++1y "$@" -o build2/b-boot $src -lpthread -stdlib=libc++ ${LDFLAGS} diff --git a/scripts/build2/0.8.0/script.sh b/scripts/build2/0.8.0/script.sh new file mode 100755 index 000000000..e96b92c46 --- /dev/null +++ b/scripts/build2/0.8.0/script.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +MASON_NAME=build2 +MASON_VERSION=0.8.0 +MASON_LIB_FILE=bin/bpkg + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://download.build2.org/${MASON_VERSION}/build2-toolchain-${MASON_VERSION}.tar.gz \ + 4ddfaa4f763ea7d99da4a9002f9714715e838e56 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/build2-toolchain-${MASON_VERSION} +} + +function mason_compile { + # NOTE: build2 requires a c++17 capable compiler and it uses CXX11_ABI features in libstdc++ (so it must be built with _GLIBCXX_USE_CXX11_ABI=1) + # Since we want the binaries to be portable to pre cxx11 abi machines, we statically link against libc++ instead of linking against libstdc++ + # note with clang 4.x will hit "header 'shared_mutex' not found and cannot be generated" because c++17 support is lacking + LDFLAGS="" + if [[ $(uname -s) == 'Linux' ]]; then + LDFLAGS="-Wl,--start-group -lc++ -lc++abi -pthread -lrt" + fi + patch -N -p1 < ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff + ./build.sh --install-dir ${MASON_PREFIX} --verbose 3 --sudo "" --trust yes ${CXX:-clang++} -O3 -stdlib=libc++ ${LDFLAGS} +} + +function mason_cflags { + : +} + +function mason_static_libs { + : +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/bzip2/1.0.6/script.sh b/scripts/bzip2/1.0.6/script.sh index 8155a1ec1..b9c2982b7 100755 --- a/scripts/bzip2/1.0.6/script.sh +++ b/scripts/bzip2/1.0.6/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libbz2.a function mason_load_source { mason_download \ - http://www.bzip.org/1.0.6/bzip2-1.0.6.tar.gz \ + https://src.fedoraproject.org/repo/pkgs/bzip2/bzip2-1.0.6.tar.gz/00b516f4704d4a7cb50a1d97e6e8e15b/bzip2-1.0.6.tar.gz \ e47e9034c4116f467618cfaaa4d3aca004094007 mason_extract_tar_gz diff --git a/scripts/cairo/1.12.18/script.sh b/scripts/cairo/1.12.18/script.sh index 387180f7b..e500465d9 100755 --- a/scripts/cairo/1.12.18/script.sh +++ b/scripts/cairo/1.12.18/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/cairo.pc function mason_load_source { mason_download \ - http://cairographics.org/releases/${MASON_NAME}-${MASON_VERSION}.tar.xz \ + https://cairographics.org/releases/${MASON_NAME}-${MASON_VERSION}.tar.xz \ 34e29ec00864859cc26ac3e45a02d7b2cb65d1c8 mason_extract_tar_xz diff --git a/scripts/cairo/1.14.0/script.sh b/scripts/cairo/1.14.0/script.sh index 451bf655c..09763cf4b 100755 --- a/scripts/cairo/1.14.0/script.sh +++ b/scripts/cairo/1.14.0/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/cairo.pc function mason_load_source { mason_download \ - http://cairographics.org/releases/${MASON_NAME}-${MASON_VERSION}.tar.xz \ + https://cairographics.org/releases/${MASON_NAME}-${MASON_VERSION}.tar.xz \ 69b3923f8f113206f6c0e2972de4469d04b04592 mason_extract_tar_xz diff --git a/scripts/cairo/1.14.2/script.sh b/scripts/cairo/1.14.2/script.sh index 285fbbb83..f16aa48ff 100755 --- a/scripts/cairo/1.14.2/script.sh +++ b/scripts/cairo/1.14.2/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/cairo.pc function mason_load_source { mason_download \ - http://cairographics.org/releases/${MASON_NAME}-${MASON_VERSION}.tar.xz \ + https://cairographics.org/releases/${MASON_NAME}-${MASON_VERSION}.tar.xz \ 3202106739cb0cb044c910a9b67769c95d0b6bce mason_extract_tar_xz diff --git a/scripts/cairo/1.14.4/script.sh b/scripts/cairo/1.14.4/script.sh index b36da2cc3..4a08a2edb 100755 --- a/scripts/cairo/1.14.4/script.sh +++ b/scripts/cairo/1.14.4/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/cairo.pc function mason_load_source { mason_download \ - http://cairographics.org/releases/${MASON_NAME}-${MASON_VERSION}.tar.xz \ + https://cairographics.org/releases/${MASON_NAME}-${MASON_VERSION}.tar.xz \ ecf18db1e89d99799783757d9026a74012dfafcb mason_extract_tar_xz diff --git a/scripts/cairo/1.14.6/script.sh b/scripts/cairo/1.14.6/script.sh index ac89c1b0e..bb68e4076 100755 --- a/scripts/cairo/1.14.6/script.sh +++ b/scripts/cairo/1.14.6/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/cairo.pc function mason_load_source { mason_download \ - http://cairographics.org/releases/${MASON_NAME}-${MASON_VERSION}.tar.xz \ + https://cairographics.org/releases/${MASON_NAME}-${MASON_VERSION}.tar.xz \ b19d7d7b4e290eb6377ddc3688984cb66da036cb mason_extract_tar_xz diff --git a/scripts/cairo/1.14.8/script.sh b/scripts/cairo/1.14.8/script.sh index 1845a6025..c56b0b5a3 100755 --- a/scripts/cairo/1.14.8/script.sh +++ b/scripts/cairo/1.14.8/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/cairo.pc function mason_load_source { mason_download \ - http://cairographics.org/releases/${MASON_NAME}-${MASON_VERSION}.tar.xz \ + https://cairographics.org/releases/${MASON_NAME}-${MASON_VERSION}.tar.xz \ b6a7b9d02e24fdd5fc5c44d30040f14d361a0950 mason_extract_tar_xz diff --git a/scripts/gzip/a4cfa6a638de351d26834cf2fea373693cdaa927/.travis.yml b/scripts/catch/2.12.1/.travis.yml similarity index 100% rename from scripts/gzip/a4cfa6a638de351d26834cf2fea373693cdaa927/.travis.yml rename to scripts/catch/2.12.1/.travis.yml diff --git a/scripts/catch/2.12.1/script.sh b/scripts/catch/2.12.1/script.sh new file mode 100644 index 000000000..07787d497 --- /dev/null +++ b/scripts/catch/2.12.1/script.sh @@ -0,0 +1,31 @@ +MASON_NAME=catch +MASON_VERSION=2.12.1 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/catchorg/Catch2/archive/v${MASON_VERSION}.tar.gz \ + feb00ce1fdecf00e8dd487b02c64da6d39108e4e + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/Catch2-${MASON_VERSION} +} + +# nothing to build, just copying single include header file +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r single_include/catch2/catch.hpp ${MASON_PREFIX}/include +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/gzip/bb80aac/.travis.yml b/scripts/catch/2.4.0/.travis.yml similarity index 100% rename from scripts/gzip/bb80aac/.travis.yml rename to scripts/catch/2.4.0/.travis.yml diff --git a/scripts/catch/2.4.0/script.sh b/scripts/catch/2.4.0/script.sh new file mode 100644 index 000000000..129ddc309 --- /dev/null +++ b/scripts/catch/2.4.0/script.sh @@ -0,0 +1,31 @@ +MASON_NAME=catch +MASON_VERSION=2.4.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/catchorg/Catch2/archive/v${MASON_VERSION}.tar.gz \ + de446b4b31efdcd6784cc97464050f2b1d91d43a + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/Catch2-${MASON_VERSION} +} + +# nothing to build, just copying single include header file +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r single_include/catch2/catch.hpp ${MASON_PREFIX}/include +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/ccache/3.7.2/.travis.yml b/scripts/ccache/3.7.2/.travis.yml new file mode 100644 index 000000000..781977716 --- /dev/null +++ b/scripts/ccache/3.7.2/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode9.2 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/ccache/3.7.2/script.sh b/scripts/ccache/3.7.2/script.sh new file mode 100755 index 000000000..83261d2d5 --- /dev/null +++ b/scripts/ccache/3.7.2/script.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +MASON_NAME=ccache +MASON_VERSION=3.7.1 +MASON_LIB_FILE=bin/ccache + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/${MASON_NAME}/${MASON_NAME}/releases/download/v${MASON_VERSION}/${MASON_NAME}-${MASON_VERSION}.tar.gz \ + 287db660ed7e45aeb824d69596711927a6a29221 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + # Add optimization flags since CFLAGS overrides the default (-g -O2) + export CFLAGS="${CFLAGS} -O3 -DNDEBUG" + ./configure \ + --prefix=${MASON_PREFIX} \ + ${MASON_HOST_ARG} \ + --with-bundled-zlib + make V=1 -j${MASON_CONCURRENCY} + make install +} + +function mason_ldflags { + : +} + +function mason_cflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/ccache/4.0/.travis.yml b/scripts/ccache/4.0/.travis.yml new file mode 100644 index 000000000..432d86d4f --- /dev/null +++ b/scripts/ccache/4.0/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode12.2 + compiler: clang + - os: linux + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/ccache/4.0/script.sh b/scripts/ccache/4.0/script.sh new file mode 100755 index 000000000..e368fd05d --- /dev/null +++ b/scripts/ccache/4.0/script.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +MASON_NAME=ccache +MASON_VERSION=4.0 +MASON_LIB_FILE=bin/ccache + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/${MASON_NAME}/${MASON_NAME}/releases/download/v${MASON_VERSION}/${MASON_NAME}-${MASON_VERSION}.tar.gz \ + cda12d016e6e020e094cbde68e4e24712471841b + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_prepare_compile { + ${MASON_DIR}/mason install cmake 3.18.1 + ${MASON_DIR}/mason link cmake 3.18.1 +} + +function mason_compile { + rm -rf build + mkdir -p build + cd build + CMAKE_PREFIX_PATH=${MASON_ROOT}/.link \ + ${MASON_ROOT}/.link/bin/cmake \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} \ + -DCMAKE_BUILD_TYPE=Release \ + -DZSTD_FROM_INTERNET=ON \ + .. + make VERBOSE=1 -j${MASON_CONCURRENCY} + make install +} + +function mason_ldflags { + : +} + +function mason_cflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/clang++/10.0.0/.travis.yml b/scripts/clang++/10.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang++/10.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang++/10.0.0/script.sh b/scripts/clang++/10.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang++/10.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang++/11.0.0/.travis.yml b/scripts/clang++/11.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang++/11.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang++/11.0.0/script.sh b/scripts/clang++/11.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang++/11.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang++/6.0.0/script.sh b/scripts/clang++/6.0.0/script.sh index 391259e3c..c21f18d8a 100755 --- a/scripts/clang++/6.0.0/script.sh +++ b/scripts/clang++/6.0.0/script.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + # dynamically determine the path to this package HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" diff --git a/scripts/clang++/6.0.1/.travis.yml b/scripts/clang++/6.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang++/6.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang++/6.0.1/script.sh b/scripts/clang++/6.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang++/6.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang++/7.0.0/script.sh b/scripts/clang++/7.0.0/script.sh index 391259e3c..c21f18d8a 100755 --- a/scripts/clang++/7.0.0/script.sh +++ b/scripts/clang++/7.0.0/script.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + # dynamically determine the path to this package HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" diff --git a/scripts/clang++/7.0.1/.travis.yml b/scripts/clang++/7.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang++/7.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang++/7.0.1/script.sh b/scripts/clang++/7.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang++/7.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang++/8.0.0/.travis.yml b/scripts/clang++/8.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang++/8.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang++/8.0.0/script.sh b/scripts/clang++/8.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang++/8.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang++/9.0.0/.travis.yml b/scripts/clang++/9.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang++/9.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang++/9.0.0/script.sh b/scripts/clang++/9.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang++/9.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang++/9.0.1/.travis.yml b/scripts/clang++/9.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang++/9.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang++/9.0.1/script.sh b/scripts/clang++/9.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang++/9.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang++/base/common.sh b/scripts/clang++/base/common.sh index 054d4bcf7..fca163950 100755 --- a/scripts/clang++/base/common.sh +++ b/scripts/clang++/base/common.sh @@ -5,15 +5,17 @@ function mason_build { CLANG_PREFIX=$(${MASON_DIR}/mason prefix llvm ${MASON_VERSION}) MAJOR_MINOR=$(echo $MASON_VERSION | cut -d '.' -f1-2) + MAJOR=$(echo $MASON_VERSION | cut -d '.' -f1) # copy bin mkdir -p "${MASON_PREFIX}/bin" cp -a "${CLANG_PREFIX}/bin/${MASON_NAME}" "${MASON_PREFIX}/bin/" cp -a "${CLANG_PREFIX}/bin/${MASON_NAME}-${MAJOR_MINOR}" "${MASON_PREFIX}/bin/" cp -a "${CLANG_PREFIX}/bin/clang" "${MASON_PREFIX}/bin/" - cp -a "${CLANG_PREFIX}/bin/clang-${MAJOR_MINOR}" "${MASON_PREFIX}/bin/" + cp -a "${CLANG_PREFIX}/bin/clang-${MAJOR}" "${MASON_PREFIX}/bin/" cp -a "${CLANG_PREFIX}/bin/llvm-symbolizer" "${MASON_PREFIX}/bin/" cp -a "${CLANG_PREFIX}/bin/llvm-ar" "${MASON_PREFIX}/bin/" + cp -a "${CLANG_PREFIX}/bin/llvm-nm" "${MASON_PREFIX}/bin/" cp -a "${CLANG_PREFIX}/bin/llvm-ranlib" "${MASON_PREFIX}/bin/" cp -a "${CLANG_PREFIX}/bin/asan_symbolize" "${MASON_PREFIX}/bin/" cp -a "${CLANG_PREFIX}/bin/asan_symbolize.py" "${MASON_PREFIX}/bin/" diff --git a/scripts/clang-format/10.0.0/.travis.yml b/scripts/clang-format/10.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang-format/10.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang-format/10.0.0/script.sh b/scripts/clang-format/10.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang-format/10.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang-format/11.0.0/.travis.yml b/scripts/clang-format/11.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang-format/11.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang-format/11.0.0/script.sh b/scripts/clang-format/11.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang-format/11.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang-format/6.0.0/script.sh b/scripts/clang-format/6.0.0/script.sh index 391259e3c..c21f18d8a 100755 --- a/scripts/clang-format/6.0.0/script.sh +++ b/scripts/clang-format/6.0.0/script.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + # dynamically determine the path to this package HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" diff --git a/scripts/clang-format/6.0.1/.travis.yml b/scripts/clang-format/6.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang-format/6.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang-format/6.0.1/script.sh b/scripts/clang-format/6.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang-format/6.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang-format/7.0.0/script.sh b/scripts/clang-format/7.0.0/script.sh index 391259e3c..c21f18d8a 100755 --- a/scripts/clang-format/7.0.0/script.sh +++ b/scripts/clang-format/7.0.0/script.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + # dynamically determine the path to this package HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" diff --git a/scripts/clang-format/7.0.1/.travis.yml b/scripts/clang-format/7.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang-format/7.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang-format/7.0.1/script.sh b/scripts/clang-format/7.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang-format/7.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang-format/8.0.0/.travis.yml b/scripts/clang-format/8.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang-format/8.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang-format/8.0.0/script.sh b/scripts/clang-format/8.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang-format/8.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang-format/9.0.0/.travis.yml b/scripts/clang-format/9.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang-format/9.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang-format/9.0.0/script.sh b/scripts/clang-format/9.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang-format/9.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang-format/9.0.1/.travis.yml b/scripts/clang-format/9.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang-format/9.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang-format/9.0.1/script.sh b/scripts/clang-format/9.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang-format/9.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang-tidy/10.0.0/.travis.yml b/scripts/clang-tidy/10.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang-tidy/10.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang-tidy/10.0.0/README-yaml.md b/scripts/clang-tidy/10.0.0/README-yaml.md new file mode 100644 index 000000000..2cc738ab7 --- /dev/null +++ b/scripts/clang-tidy/10.0.0/README-yaml.md @@ -0,0 +1,13 @@ +This is a copy of `pyyaml-3.12` vendored on april 24, 2018 by @springmeyer. + +https://github.com/mapbox/mason/issues/563 documents why. + +The process to vendor was: + +``` +cd mason +pip install pyyaml --user +cp $(python -m site --user-site)/yaml scripts/clang-tidy/6.0.0/ +``` + +Then the `clang-tidy` package was built and the `yaml` directory was copied beside the `share/run-clang-tidy.py` script (which depends on it). \ No newline at end of file diff --git a/scripts/clang-tidy/10.0.0/script.sh b/scripts/clang-tidy/10.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang-tidy/10.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang-tidy/10.0.0/yaml/__init__.py b/scripts/clang-tidy/10.0.0/yaml/__init__.py new file mode 100644 index 000000000..87c15d38a --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/__init__.py @@ -0,0 +1,315 @@ + +from error import * + +from tokens import * +from events import * +from nodes import * + +from loader import * +from dumper import * + +__version__ = '3.12' + +try: + from cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=Loader, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=Loader): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(object): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __metaclass__ = YAMLObjectMetaclass + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = Loader + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + to_yaml = classmethod(to_yaml) + diff --git a/scripts/clang-tidy/10.0.0/yaml/composer.py b/scripts/clang-tidy/10.0.0/yaml/composer.py new file mode 100644 index 000000000..06e5ac782 --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/composer.py @@ -0,0 +1,139 @@ + +__all__ = ['Composer', 'ComposerError'] + +from error import MarkedYAMLError +from events import * +from nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer(object): + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor.encode('utf-8'), event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurence" + % anchor.encode('utf-8'), self.anchors[anchor].start_mark, + "second occurence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/scripts/clang-tidy/10.0.0/yaml/constructor.py b/scripts/clang-tidy/10.0.0/yaml/constructor.py new file mode 100644 index 000000000..635faac3e --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/constructor.py @@ -0,0 +1,675 @@ + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError'] + +from error import * +from nodes import * + +import datetime + +import binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor(object): + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = generator.next() + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError, exc: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + add_constructor = classmethod(add_constructor) + + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + add_multi_constructor = classmethod(add_multi_constructor) + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + u'yes': True, + u'no': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + value = self.construct_scalar(node) + try: + return str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError), exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + ur'''^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\.(?P[0-9]*))? + (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + try: + return value.encode('ascii') + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), + node.start_mark) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class Constructor(SafeConstructor): + + def construct_python_str(self, node): + return self.construct_scalar(node).encode('utf-8') + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_long(self, node): + return long(self.construct_yaml_int(node)) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + try: + __import__(name) + except ImportError, exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) + return sys.modules[name] + + def find_python_name(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if u'.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = '__builtin__' + object_name = name + try: + __import__(module_name) + except ImportError, exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" % (object_name.encode('utf-8'), + module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + class classobj: pass + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type(self.classobj)) \ + and not args and not kwds: + instance = self.classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(object, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/none', + Constructor.construct_yaml_null) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/bool', + Constructor.construct_yaml_bool) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/str', + Constructor.construct_python_str) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', + Constructor.construct_python_unicode) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/int', + Constructor.construct_yaml_int) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/long', + Constructor.construct_python_long) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/float', + Constructor.construct_yaml_float) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/complex', + Constructor.construct_python_complex) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/list', + Constructor.construct_yaml_seq) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', + Constructor.construct_python_tuple) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/dict', + Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', + Constructor.construct_python_name) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', + Constructor.construct_python_module) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', + Constructor.construct_python_object) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', + Constructor.construct_python_object_apply) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', + Constructor.construct_python_object_new) + diff --git a/scripts/clang-tidy/10.0.0/yaml/cyaml.py b/scripts/clang-tidy/10.0.0/yaml/cyaml.py new file mode 100644 index 000000000..68dcd7519 --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/cyaml.py @@ -0,0 +1,85 @@ + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper'] + +from _yaml import CParser, CEmitter + +from constructor import * + +from serializer import * +from representer import * + +from resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/10.0.0/yaml/dumper.py b/scripts/clang-tidy/10.0.0/yaml/dumper.py new file mode 100644 index 000000000..f811d2c91 --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/dumper.py @@ -0,0 +1,62 @@ + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from emitter import * +from serializer import * +from representer import * +from resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/10.0.0/yaml/emitter.py b/scripts/clang-tidy/10.0.0/yaml/emitter.py new file mode 100644 index 000000000..e5bcdcccb --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/emitter.py @@ -0,0 +1,1140 @@ + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from error import YAMLError +from events import * + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis(object): + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter(object): + + DEFAULT_TAG_PREFIXES = { + u'!' : u'!', + u'tag:yaml.org,2002:' : u'!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = u'\n' + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not getattr(self.stream, 'encoding', None): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = self.event.tags.keys() + handles.sort() + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor(u'&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator(u'[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator(u'{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(u':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator(u'-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(u':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == u'') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" + % (handle.encode('utf-8'))) + for ch in handle[1:-1]: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch.encode('utf-8'), handle.encode('utf-8'))) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == u'!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return u''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == u'!': + return tag + handle = None + suffix = tag + prefixes = self.tag_prefixes.keys() + prefixes.sort() + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.~*\'()[]' \ + or (ch == u'!' and handle != u'!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = u''.join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch.encode('utf-8'), anchor.encode('utf-8'))) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',?[]{}': + flow_indicators = True + if ch == u':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator(u'\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u'\'': + data = u'\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + self.write_indicator(u'\'', False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'\"': u'\"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ + or not (u'\x20' <= ch <= u'\x7E' + or (self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+u'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_block_hints(self, text): + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + hints += unicode(self.best_indent) + if text[-1] not in u'\n\x85\u2028\u2029': + hints += u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + hints += u'+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'>'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != u' ' \ + and text[start] == u'\n': + self.write_line_break() + leading_space = (ch == u' ') + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + spaces = (ch == u' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'|'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + diff --git a/scripts/clang-tidy/10.0.0/yaml/error.py b/scripts/clang-tidy/10.0.0/yaml/error.py new file mode 100644 index 000000000..577686db5 --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/error.py @@ -0,0 +1,75 @@ + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark(object): + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end].encode('utf-8') + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/scripts/clang-tidy/10.0.0/yaml/events.py b/scripts/clang-tidy/10.0.0/yaml/events.py new file mode 100644 index 000000000..f79ad389c --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/events.py @@ -0,0 +1,86 @@ + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/scripts/clang-tidy/10.0.0/yaml/loader.py b/scripts/clang-tidy/10.0.0/yaml/loader.py new file mode 100644 index 000000000..293ff467b --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/loader.py @@ -0,0 +1,40 @@ + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] + +from reader import * +from scanner import * +from parser import * +from composer import * +from constructor import * +from resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/10.0.0/yaml/nodes.py b/scripts/clang-tidy/10.0.0/yaml/nodes.py new file mode 100644 index 000000000..c4f070c41 --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/nodes.py @@ -0,0 +1,49 @@ + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/scripts/clang-tidy/10.0.0/yaml/parser.py b/scripts/clang-tidy/10.0.0/yaml/parser.py new file mode 100644 index 000000000..f9e3057f3 --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/parser.py @@ -0,0 +1,589 @@ + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from error import MarkedYAMLError +from tokens import * +from events import * +from scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + u'!': u'!', + u'!!': u'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == u'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle.encode('utf-8'), + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle.encode('utf-8'), + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == u'!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), u'', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), u'', mark, mark) + diff --git a/scripts/clang-tidy/10.0.0/yaml/reader.py b/scripts/clang-tidy/10.0.0/yaml/reader.py new file mode 100644 index 000000000..3249e6b9f --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/reader.py @@ -0,0 +1,190 @@ +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from error import YAMLError, Mark + +import codecs, re + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, str): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to unicode, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object, + # - a `unicode` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = u'' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, unicode): + self.name = "" + self.check_printable(stream) + self.buffer = stream+u'\0' + elif isinstance(stream, str): + self.name = "" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "") + self.eof = False + self.raw_buffer = '' + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' \ + or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and len(self.raw_buffer) < 2: + self.update_raw() + if not isinstance(self.raw_buffer, unicode): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError, exc: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += u'\0' + self.raw_buffer = None + break + + def update_raw(self, size=1024): + data = self.stream.read(size) + if data: + self.raw_buffer += data + self.stream_pointer += len(data) + else: + self.eof = True + +#try: +# import psyco +# psyco.bind(Reader) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/10.0.0/yaml/representer.py b/scripts/clang-tidy/10.0.0/yaml/representer.py new file mode 100644 index 000000000..4ea8cb1fe --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/representer.py @@ -0,0 +1,486 @@ + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from error import * +from nodes import * + +import datetime + +import sys, copy_reg, types + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter(object): + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=None): + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def get_classobj_bases(self, cls): + bases = [cls] + for base in cls.__bases__: + bases.extend(self.get_classobj_bases(base)) + return bases + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if type(data) is types.InstanceType: + data_types = self.get_classobj_bases(data.__class__)+list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, unicode(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + add_representer = classmethod(add_representer) + + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + add_multi_representer = classmethod(add_multi_representer) + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + mapping.sort() + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data is None: + return True + if isinstance(data, tuple) and data == (): + return True + if isinstance(data, (str, unicode, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:null', + u'null') + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data): + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + def represent_long(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + else: + value = unicode(repr(data)).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if u'.' not in value and u'e' in value: + value = value.replace(u'e', u'.0e', 1) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = unicode(data.isoformat()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = unicode(data.isoformat(' ')) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object: %s" % data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(unicode, + SafeRepresenter.represent_unicode) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(long, + SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, unicode(data)) + + def represent_complex(self, data): + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') + + def represent_module(self, data): + return self.represent_scalar( + u'tag:yaml.org,2002:python/module:'+data.__name__, u'') + + def represent_instance(self, data): + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed by + # calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never be + # called and the class instance is created by instantiating a trivial + # class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, we + # produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+class_name, state) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:'+class_name, args) + value = {} + if args: + value['args'] = args + value['state'] = state + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:'+class_name, value) + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copy_reg.dispatch_table: + reduce = copy_reg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent object: %r" % data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + +Representer.add_representer(str, + Representer.represent_str) + +Representer.add_representer(unicode, + Representer.represent_unicode) + +Representer.add_representer(long, + Representer.represent_long) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(types.ClassType, + Representer.represent_name) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(types.InstanceType, + Representer.represent_instance) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/scripts/clang-tidy/10.0.0/yaml/resolver.py b/scripts/clang-tidy/10.0.0/yaml/resolver.py new file mode 100644 index 000000000..528fbc0ea --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/resolver.py @@ -0,0 +1,227 @@ + +__all__ = ['BaseResolver', 'Resolver'] + +from error import * +from nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver(object): + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + implicit_resolvers = {} + for key in cls.yaml_implicit_resolvers: + implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:] + cls.yaml_implicit_resolvers = implicit_resolvers + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + add_implicit_resolver = classmethod(add_implicit_resolver) + + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, basestring) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (basestring, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + add_path_resolver = classmethod(add_path_resolver) + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, basestring): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, basestring): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == u'': + resolvers = self.yaml_implicit_resolvers.get(u'', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:bool', + re.compile(ur'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:int', + re.compile(ur'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list(u'-+0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:merge', + re.compile(ur'^(?:<<)$'), + [u'<']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:null', + re.compile(ur'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:timestamp', + re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:value', + re.compile(ur'^(?:=)$'), + [u'=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:yaml', + re.compile(ur'^(?:!|&|\*)$'), + list(u'!&*')) + diff --git a/scripts/clang-tidy/10.0.0/yaml/scanner.py b/scripts/clang-tidy/10.0.0/yaml/scanner.py new file mode 100644 index 000000000..834f662a4 --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/scanner.py @@ -0,0 +1,1453 @@ + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from error import MarkedYAMLError +from tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey(object): + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner(object): + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == u'\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == u'%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == u'-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == u'.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == u'[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == u'{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == u']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == u'}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == u',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == u'-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == u'?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == u':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == u'*': + return self.fetch_alias() + + # Is it an anchor? + if ch == u'&': + return self.fetch_anchor() + + # Is it a tag? + if ch == u'!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == u'|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == u'>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == u'\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == u'\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" + % ch.encode('utf-8'), self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in self.possible_simple_keys.keys(): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be catched by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'---' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'...' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' + and (ch == u'-' or (not self.flow_level and ch in u'?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == u'\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == u'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == u'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not (u'0' <= ch <= u'9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 0 + while u'0' <= self.peek(length) <= u'9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == u' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != u' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == u'*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == u'<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != u'>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + elif ch in u'\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = u'!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in u'\0 \r\n\x85\u2028\u2029': + if ch == u'!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = u'!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = u'!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = u'' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != u'\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in u' \t' + length = 0 + while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != u'\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == u'\n' \ + and leading_non_space and self.peek() not in u' \t': + if not breaks: + chunks.append(u' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == u'\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() != u' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + while self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + u'0': u'\0', + u'a': u'\x07', + u'b': u'\x08', + u't': u'\x09', + u'\t': u'\x09', + u'n': u'\x0A', + u'v': u'\x0B', + u'f': u'\x0C', + u'r': u'\x0D', + u'e': u'\x1B', + u' ': u'\x20', + u'\"': u'\"', + u'\\': u'\\', + u'N': u'\x85', + u'_': u'\xA0', + u'L': u'\u2028', + u'P': u'\u2029', + } + + ESCAPE_CODES = { + u'x': 2, + u'u': 4, + u'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == u'\'' and self.peek(1) == u'\'': + chunks.append(u'\'') + self.forward(2) + elif (double and ch == u'\'') or (not double and ch in u'\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == u'\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k).encode('utf-8')), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(unichr(code)) + self.forward(length) + elif ch in u'\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in u' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == u'\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in u' \t': + self.forward() + if self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ':' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == u'#': + break + while True: + ch = self.peek(length) + if ch in u'\0 \t\r\n\x85\u2028\u2029' \ + or (not self.flow_level and ch == u':' and + self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ + or (self.flow_level and ch in u',:?[]{}'): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if (self.flow_level and ch == u':' + and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): + self.forward(length) + raise ScannerError("while scanning a plain scalar", start_mark, + "found unexpected ':'", self.get_mark(), + "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == u'#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(u''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in u' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != u'!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != u' ': + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if ch != u'!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.!~*\'()[]%': + if ch == u'%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch.encode('utf-8'), + self.get_mark()) + return u''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + bytes = [] + mark = self.get_mark() + while self.peek() == u'%': + self.forward() + for k in range(2): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % + (self.peek(k).encode('utf-8')), self.get_mark()) + bytes.append(chr(int(self.prefix(2), 16))) + self.forward(2) + try: + value = unicode(''.join(bytes), 'utf-8') + except UnicodeDecodeError, exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in u'\r\n\x85': + if self.prefix(2) == u'\r\n': + self.forward(2) + else: + self.forward() + return u'\n' + elif ch in u'\u2028\u2029': + self.forward() + return ch + return u'' + +#try: +# import psyco +# psyco.bind(Scanner) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/10.0.0/yaml/serializer.py b/scripts/clang-tidy/10.0.0/yaml/serializer.py new file mode 100644 index 000000000..0bf1e96dc --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/serializer.py @@ -0,0 +1,111 @@ + +__all__ = ['Serializer', 'SerializerError'] + +from error import YAMLError +from events import * +from nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer(object): + + ANCHOR_TEMPLATE = u'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/scripts/clang-tidy/10.0.0/yaml/tokens.py b/scripts/clang-tidy/10.0.0/yaml/tokens.py new file mode 100644 index 000000000..4d0b48a39 --- /dev/null +++ b/scripts/clang-tidy/10.0.0/yaml/tokens.py @@ -0,0 +1,104 @@ + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '' + +class DirectiveToken(Token): + id = '' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '' + +class DocumentEndToken(Token): + id = '' + +class StreamStartToken(Token): + id = '' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '' + +class BlockSequenceStartToken(Token): + id = '' + +class BlockMappingStartToken(Token): + id = '' + +class BlockEndToken(Token): + id = '' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/scripts/clang-tidy/11.0.0/.travis.yml b/scripts/clang-tidy/11.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang-tidy/11.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang-tidy/11.0.0/README-yaml.md b/scripts/clang-tidy/11.0.0/README-yaml.md new file mode 100644 index 000000000..2cc738ab7 --- /dev/null +++ b/scripts/clang-tidy/11.0.0/README-yaml.md @@ -0,0 +1,13 @@ +This is a copy of `pyyaml-3.12` vendored on april 24, 2018 by @springmeyer. + +https://github.com/mapbox/mason/issues/563 documents why. + +The process to vendor was: + +``` +cd mason +pip install pyyaml --user +cp $(python -m site --user-site)/yaml scripts/clang-tidy/6.0.0/ +``` + +Then the `clang-tidy` package was built and the `yaml` directory was copied beside the `share/run-clang-tidy.py` script (which depends on it). \ No newline at end of file diff --git a/scripts/clang-tidy/11.0.0/script.sh b/scripts/clang-tidy/11.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang-tidy/11.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang-tidy/11.0.0/yaml/__init__.py b/scripts/clang-tidy/11.0.0/yaml/__init__.py new file mode 100644 index 000000000..87c15d38a --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/__init__.py @@ -0,0 +1,315 @@ + +from error import * + +from tokens import * +from events import * +from nodes import * + +from loader import * +from dumper import * + +__version__ = '3.12' + +try: + from cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=Loader, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=Loader): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(object): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __metaclass__ = YAMLObjectMetaclass + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = Loader + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + to_yaml = classmethod(to_yaml) + diff --git a/scripts/clang-tidy/11.0.0/yaml/composer.py b/scripts/clang-tidy/11.0.0/yaml/composer.py new file mode 100644 index 000000000..06e5ac782 --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/composer.py @@ -0,0 +1,139 @@ + +__all__ = ['Composer', 'ComposerError'] + +from error import MarkedYAMLError +from events import * +from nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer(object): + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor.encode('utf-8'), event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurence" + % anchor.encode('utf-8'), self.anchors[anchor].start_mark, + "second occurence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/scripts/clang-tidy/11.0.0/yaml/constructor.py b/scripts/clang-tidy/11.0.0/yaml/constructor.py new file mode 100644 index 000000000..635faac3e --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/constructor.py @@ -0,0 +1,675 @@ + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError'] + +from error import * +from nodes import * + +import datetime + +import binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor(object): + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = generator.next() + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError, exc: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + add_constructor = classmethod(add_constructor) + + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + add_multi_constructor = classmethod(add_multi_constructor) + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + u'yes': True, + u'no': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + value = self.construct_scalar(node) + try: + return str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError), exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + ur'''^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\.(?P[0-9]*))? + (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + try: + return value.encode('ascii') + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), + node.start_mark) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class Constructor(SafeConstructor): + + def construct_python_str(self, node): + return self.construct_scalar(node).encode('utf-8') + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_long(self, node): + return long(self.construct_yaml_int(node)) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + try: + __import__(name) + except ImportError, exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) + return sys.modules[name] + + def find_python_name(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if u'.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = '__builtin__' + object_name = name + try: + __import__(module_name) + except ImportError, exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" % (object_name.encode('utf-8'), + module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + class classobj: pass + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type(self.classobj)) \ + and not args and not kwds: + instance = self.classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(object, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/none', + Constructor.construct_yaml_null) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/bool', + Constructor.construct_yaml_bool) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/str', + Constructor.construct_python_str) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', + Constructor.construct_python_unicode) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/int', + Constructor.construct_yaml_int) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/long', + Constructor.construct_python_long) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/float', + Constructor.construct_yaml_float) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/complex', + Constructor.construct_python_complex) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/list', + Constructor.construct_yaml_seq) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', + Constructor.construct_python_tuple) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/dict', + Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', + Constructor.construct_python_name) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', + Constructor.construct_python_module) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', + Constructor.construct_python_object) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', + Constructor.construct_python_object_apply) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', + Constructor.construct_python_object_new) + diff --git a/scripts/clang-tidy/11.0.0/yaml/cyaml.py b/scripts/clang-tidy/11.0.0/yaml/cyaml.py new file mode 100644 index 000000000..68dcd7519 --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/cyaml.py @@ -0,0 +1,85 @@ + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper'] + +from _yaml import CParser, CEmitter + +from constructor import * + +from serializer import * +from representer import * + +from resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/11.0.0/yaml/dumper.py b/scripts/clang-tidy/11.0.0/yaml/dumper.py new file mode 100644 index 000000000..f811d2c91 --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/dumper.py @@ -0,0 +1,62 @@ + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from emitter import * +from serializer import * +from representer import * +from resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/11.0.0/yaml/emitter.py b/scripts/clang-tidy/11.0.0/yaml/emitter.py new file mode 100644 index 000000000..e5bcdcccb --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/emitter.py @@ -0,0 +1,1140 @@ + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from error import YAMLError +from events import * + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis(object): + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter(object): + + DEFAULT_TAG_PREFIXES = { + u'!' : u'!', + u'tag:yaml.org,2002:' : u'!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = u'\n' + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not getattr(self.stream, 'encoding', None): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = self.event.tags.keys() + handles.sort() + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor(u'&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator(u'[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator(u'{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(u':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator(u'-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(u':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == u'') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" + % (handle.encode('utf-8'))) + for ch in handle[1:-1]: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch.encode('utf-8'), handle.encode('utf-8'))) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == u'!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return u''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == u'!': + return tag + handle = None + suffix = tag + prefixes = self.tag_prefixes.keys() + prefixes.sort() + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.~*\'()[]' \ + or (ch == u'!' and handle != u'!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = u''.join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch.encode('utf-8'), anchor.encode('utf-8'))) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',?[]{}': + flow_indicators = True + if ch == u':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator(u'\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u'\'': + data = u'\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + self.write_indicator(u'\'', False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'\"': u'\"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ + or not (u'\x20' <= ch <= u'\x7E' + or (self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+u'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_block_hints(self, text): + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + hints += unicode(self.best_indent) + if text[-1] not in u'\n\x85\u2028\u2029': + hints += u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + hints += u'+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'>'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != u' ' \ + and text[start] == u'\n': + self.write_line_break() + leading_space = (ch == u' ') + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + spaces = (ch == u' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'|'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + diff --git a/scripts/clang-tidy/11.0.0/yaml/error.py b/scripts/clang-tidy/11.0.0/yaml/error.py new file mode 100644 index 000000000..577686db5 --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/error.py @@ -0,0 +1,75 @@ + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark(object): + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end].encode('utf-8') + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/scripts/clang-tidy/11.0.0/yaml/events.py b/scripts/clang-tidy/11.0.0/yaml/events.py new file mode 100644 index 000000000..f79ad389c --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/events.py @@ -0,0 +1,86 @@ + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/scripts/clang-tidy/11.0.0/yaml/loader.py b/scripts/clang-tidy/11.0.0/yaml/loader.py new file mode 100644 index 000000000..293ff467b --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/loader.py @@ -0,0 +1,40 @@ + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] + +from reader import * +from scanner import * +from parser import * +from composer import * +from constructor import * +from resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/11.0.0/yaml/nodes.py b/scripts/clang-tidy/11.0.0/yaml/nodes.py new file mode 100644 index 000000000..c4f070c41 --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/nodes.py @@ -0,0 +1,49 @@ + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/scripts/clang-tidy/11.0.0/yaml/parser.py b/scripts/clang-tidy/11.0.0/yaml/parser.py new file mode 100644 index 000000000..f9e3057f3 --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/parser.py @@ -0,0 +1,589 @@ + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from error import MarkedYAMLError +from tokens import * +from events import * +from scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + u'!': u'!', + u'!!': u'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == u'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle.encode('utf-8'), + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle.encode('utf-8'), + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == u'!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), u'', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), u'', mark, mark) + diff --git a/scripts/clang-tidy/11.0.0/yaml/reader.py b/scripts/clang-tidy/11.0.0/yaml/reader.py new file mode 100644 index 000000000..3249e6b9f --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/reader.py @@ -0,0 +1,190 @@ +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from error import YAMLError, Mark + +import codecs, re + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, str): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to unicode, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object, + # - a `unicode` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = u'' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, unicode): + self.name = "" + self.check_printable(stream) + self.buffer = stream+u'\0' + elif isinstance(stream, str): + self.name = "" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "") + self.eof = False + self.raw_buffer = '' + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' \ + or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and len(self.raw_buffer) < 2: + self.update_raw() + if not isinstance(self.raw_buffer, unicode): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError, exc: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += u'\0' + self.raw_buffer = None + break + + def update_raw(self, size=1024): + data = self.stream.read(size) + if data: + self.raw_buffer += data + self.stream_pointer += len(data) + else: + self.eof = True + +#try: +# import psyco +# psyco.bind(Reader) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/11.0.0/yaml/representer.py b/scripts/clang-tidy/11.0.0/yaml/representer.py new file mode 100644 index 000000000..4ea8cb1fe --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/representer.py @@ -0,0 +1,486 @@ + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from error import * +from nodes import * + +import datetime + +import sys, copy_reg, types + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter(object): + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=None): + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def get_classobj_bases(self, cls): + bases = [cls] + for base in cls.__bases__: + bases.extend(self.get_classobj_bases(base)) + return bases + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if type(data) is types.InstanceType: + data_types = self.get_classobj_bases(data.__class__)+list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, unicode(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + add_representer = classmethod(add_representer) + + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + add_multi_representer = classmethod(add_multi_representer) + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + mapping.sort() + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data is None: + return True + if isinstance(data, tuple) and data == (): + return True + if isinstance(data, (str, unicode, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:null', + u'null') + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data): + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + def represent_long(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + else: + value = unicode(repr(data)).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if u'.' not in value and u'e' in value: + value = value.replace(u'e', u'.0e', 1) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = unicode(data.isoformat()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = unicode(data.isoformat(' ')) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object: %s" % data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(unicode, + SafeRepresenter.represent_unicode) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(long, + SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, unicode(data)) + + def represent_complex(self, data): + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') + + def represent_module(self, data): + return self.represent_scalar( + u'tag:yaml.org,2002:python/module:'+data.__name__, u'') + + def represent_instance(self, data): + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed by + # calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never be + # called and the class instance is created by instantiating a trivial + # class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, we + # produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+class_name, state) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:'+class_name, args) + value = {} + if args: + value['args'] = args + value['state'] = state + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:'+class_name, value) + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copy_reg.dispatch_table: + reduce = copy_reg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent object: %r" % data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + +Representer.add_representer(str, + Representer.represent_str) + +Representer.add_representer(unicode, + Representer.represent_unicode) + +Representer.add_representer(long, + Representer.represent_long) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(types.ClassType, + Representer.represent_name) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(types.InstanceType, + Representer.represent_instance) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/scripts/clang-tidy/11.0.0/yaml/resolver.py b/scripts/clang-tidy/11.0.0/yaml/resolver.py new file mode 100644 index 000000000..528fbc0ea --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/resolver.py @@ -0,0 +1,227 @@ + +__all__ = ['BaseResolver', 'Resolver'] + +from error import * +from nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver(object): + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + implicit_resolvers = {} + for key in cls.yaml_implicit_resolvers: + implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:] + cls.yaml_implicit_resolvers = implicit_resolvers + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + add_implicit_resolver = classmethod(add_implicit_resolver) + + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, basestring) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (basestring, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + add_path_resolver = classmethod(add_path_resolver) + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, basestring): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, basestring): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == u'': + resolvers = self.yaml_implicit_resolvers.get(u'', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:bool', + re.compile(ur'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:int', + re.compile(ur'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list(u'-+0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:merge', + re.compile(ur'^(?:<<)$'), + [u'<']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:null', + re.compile(ur'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:timestamp', + re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:value', + re.compile(ur'^(?:=)$'), + [u'=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:yaml', + re.compile(ur'^(?:!|&|\*)$'), + list(u'!&*')) + diff --git a/scripts/clang-tidy/11.0.0/yaml/scanner.py b/scripts/clang-tidy/11.0.0/yaml/scanner.py new file mode 100644 index 000000000..834f662a4 --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/scanner.py @@ -0,0 +1,1453 @@ + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from error import MarkedYAMLError +from tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey(object): + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner(object): + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == u'\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == u'%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == u'-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == u'.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == u'[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == u'{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == u']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == u'}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == u',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == u'-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == u'?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == u':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == u'*': + return self.fetch_alias() + + # Is it an anchor? + if ch == u'&': + return self.fetch_anchor() + + # Is it a tag? + if ch == u'!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == u'|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == u'>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == u'\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == u'\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" + % ch.encode('utf-8'), self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in self.possible_simple_keys.keys(): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be catched by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'---' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'...' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' + and (ch == u'-' or (not self.flow_level and ch in u'?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == u'\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == u'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == u'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not (u'0' <= ch <= u'9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 0 + while u'0' <= self.peek(length) <= u'9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == u' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != u' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == u'*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == u'<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != u'>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + elif ch in u'\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = u'!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in u'\0 \r\n\x85\u2028\u2029': + if ch == u'!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = u'!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = u'!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = u'' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != u'\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in u' \t' + length = 0 + while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != u'\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == u'\n' \ + and leading_non_space and self.peek() not in u' \t': + if not breaks: + chunks.append(u' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == u'\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() != u' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + while self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + u'0': u'\0', + u'a': u'\x07', + u'b': u'\x08', + u't': u'\x09', + u'\t': u'\x09', + u'n': u'\x0A', + u'v': u'\x0B', + u'f': u'\x0C', + u'r': u'\x0D', + u'e': u'\x1B', + u' ': u'\x20', + u'\"': u'\"', + u'\\': u'\\', + u'N': u'\x85', + u'_': u'\xA0', + u'L': u'\u2028', + u'P': u'\u2029', + } + + ESCAPE_CODES = { + u'x': 2, + u'u': 4, + u'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == u'\'' and self.peek(1) == u'\'': + chunks.append(u'\'') + self.forward(2) + elif (double and ch == u'\'') or (not double and ch in u'\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == u'\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k).encode('utf-8')), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(unichr(code)) + self.forward(length) + elif ch in u'\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in u' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == u'\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in u' \t': + self.forward() + if self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ':' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == u'#': + break + while True: + ch = self.peek(length) + if ch in u'\0 \t\r\n\x85\u2028\u2029' \ + or (not self.flow_level and ch == u':' and + self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ + or (self.flow_level and ch in u',:?[]{}'): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if (self.flow_level and ch == u':' + and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): + self.forward(length) + raise ScannerError("while scanning a plain scalar", start_mark, + "found unexpected ':'", self.get_mark(), + "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == u'#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(u''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in u' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != u'!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != u' ': + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if ch != u'!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.!~*\'()[]%': + if ch == u'%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch.encode('utf-8'), + self.get_mark()) + return u''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + bytes = [] + mark = self.get_mark() + while self.peek() == u'%': + self.forward() + for k in range(2): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % + (self.peek(k).encode('utf-8')), self.get_mark()) + bytes.append(chr(int(self.prefix(2), 16))) + self.forward(2) + try: + value = unicode(''.join(bytes), 'utf-8') + except UnicodeDecodeError, exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in u'\r\n\x85': + if self.prefix(2) == u'\r\n': + self.forward(2) + else: + self.forward() + return u'\n' + elif ch in u'\u2028\u2029': + self.forward() + return ch + return u'' + +#try: +# import psyco +# psyco.bind(Scanner) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/11.0.0/yaml/serializer.py b/scripts/clang-tidy/11.0.0/yaml/serializer.py new file mode 100644 index 000000000..0bf1e96dc --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/serializer.py @@ -0,0 +1,111 @@ + +__all__ = ['Serializer', 'SerializerError'] + +from error import YAMLError +from events import * +from nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer(object): + + ANCHOR_TEMPLATE = u'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/scripts/clang-tidy/11.0.0/yaml/tokens.py b/scripts/clang-tidy/11.0.0/yaml/tokens.py new file mode 100644 index 000000000..4d0b48a39 --- /dev/null +++ b/scripts/clang-tidy/11.0.0/yaml/tokens.py @@ -0,0 +1,104 @@ + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '' + +class DirectiveToken(Token): + id = '' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '' + +class DocumentEndToken(Token): + id = '' + +class StreamStartToken(Token): + id = '' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '' + +class BlockSequenceStartToken(Token): + id = '' + +class BlockMappingStartToken(Token): + id = '' + +class BlockEndToken(Token): + id = '' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/scripts/clang-tidy/6.0.0/README-yaml.md b/scripts/clang-tidy/6.0.0/README-yaml.md new file mode 100644 index 000000000..2cc738ab7 --- /dev/null +++ b/scripts/clang-tidy/6.0.0/README-yaml.md @@ -0,0 +1,13 @@ +This is a copy of `pyyaml-3.12` vendored on april 24, 2018 by @springmeyer. + +https://github.com/mapbox/mason/issues/563 documents why. + +The process to vendor was: + +``` +cd mason +pip install pyyaml --user +cp $(python -m site --user-site)/yaml scripts/clang-tidy/6.0.0/ +``` + +Then the `clang-tidy` package was built and the `yaml` directory was copied beside the `share/run-clang-tidy.py` script (which depends on it). \ No newline at end of file diff --git a/scripts/clang-tidy/6.0.0/script.sh b/scripts/clang-tidy/6.0.0/script.sh index 391259e3c..c21f18d8a 100755 --- a/scripts/clang-tidy/6.0.0/script.sh +++ b/scripts/clang-tidy/6.0.0/script.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + # dynamically determine the path to this package HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" diff --git a/scripts/clang-tidy/6.0.0/yaml/__init__.py b/scripts/clang-tidy/6.0.0/yaml/__init__.py new file mode 100644 index 000000000..87c15d38a --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/__init__.py @@ -0,0 +1,315 @@ + +from error import * + +from tokens import * +from events import * +from nodes import * + +from loader import * +from dumper import * + +__version__ = '3.12' + +try: + from cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=Loader, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=Loader): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(object): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __metaclass__ = YAMLObjectMetaclass + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = Loader + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + to_yaml = classmethod(to_yaml) + diff --git a/scripts/clang-tidy/6.0.0/yaml/composer.py b/scripts/clang-tidy/6.0.0/yaml/composer.py new file mode 100644 index 000000000..06e5ac782 --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/composer.py @@ -0,0 +1,139 @@ + +__all__ = ['Composer', 'ComposerError'] + +from error import MarkedYAMLError +from events import * +from nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer(object): + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor.encode('utf-8'), event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurence" + % anchor.encode('utf-8'), self.anchors[anchor].start_mark, + "second occurence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/scripts/clang-tidy/6.0.0/yaml/constructor.py b/scripts/clang-tidy/6.0.0/yaml/constructor.py new file mode 100644 index 000000000..635faac3e --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/constructor.py @@ -0,0 +1,675 @@ + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError'] + +from error import * +from nodes import * + +import datetime + +import binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor(object): + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = generator.next() + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError, exc: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + add_constructor = classmethod(add_constructor) + + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + add_multi_constructor = classmethod(add_multi_constructor) + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + u'yes': True, + u'no': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + value = self.construct_scalar(node) + try: + return str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError), exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + ur'''^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\.(?P[0-9]*))? + (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + try: + return value.encode('ascii') + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), + node.start_mark) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class Constructor(SafeConstructor): + + def construct_python_str(self, node): + return self.construct_scalar(node).encode('utf-8') + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_long(self, node): + return long(self.construct_yaml_int(node)) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + try: + __import__(name) + except ImportError, exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) + return sys.modules[name] + + def find_python_name(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if u'.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = '__builtin__' + object_name = name + try: + __import__(module_name) + except ImportError, exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" % (object_name.encode('utf-8'), + module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + class classobj: pass + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type(self.classobj)) \ + and not args and not kwds: + instance = self.classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(object, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/none', + Constructor.construct_yaml_null) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/bool', + Constructor.construct_yaml_bool) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/str', + Constructor.construct_python_str) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', + Constructor.construct_python_unicode) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/int', + Constructor.construct_yaml_int) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/long', + Constructor.construct_python_long) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/float', + Constructor.construct_yaml_float) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/complex', + Constructor.construct_python_complex) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/list', + Constructor.construct_yaml_seq) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', + Constructor.construct_python_tuple) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/dict', + Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', + Constructor.construct_python_name) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', + Constructor.construct_python_module) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', + Constructor.construct_python_object) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', + Constructor.construct_python_object_apply) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', + Constructor.construct_python_object_new) + diff --git a/scripts/clang-tidy/6.0.0/yaml/cyaml.py b/scripts/clang-tidy/6.0.0/yaml/cyaml.py new file mode 100644 index 000000000..68dcd7519 --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/cyaml.py @@ -0,0 +1,85 @@ + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper'] + +from _yaml import CParser, CEmitter + +from constructor import * + +from serializer import * +from representer import * + +from resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/6.0.0/yaml/dumper.py b/scripts/clang-tidy/6.0.0/yaml/dumper.py new file mode 100644 index 000000000..f811d2c91 --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/dumper.py @@ -0,0 +1,62 @@ + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from emitter import * +from serializer import * +from representer import * +from resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/6.0.0/yaml/emitter.py b/scripts/clang-tidy/6.0.0/yaml/emitter.py new file mode 100644 index 000000000..e5bcdcccb --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/emitter.py @@ -0,0 +1,1140 @@ + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from error import YAMLError +from events import * + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis(object): + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter(object): + + DEFAULT_TAG_PREFIXES = { + u'!' : u'!', + u'tag:yaml.org,2002:' : u'!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = u'\n' + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not getattr(self.stream, 'encoding', None): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = self.event.tags.keys() + handles.sort() + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor(u'&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator(u'[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator(u'{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(u':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator(u'-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(u':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == u'') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" + % (handle.encode('utf-8'))) + for ch in handle[1:-1]: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch.encode('utf-8'), handle.encode('utf-8'))) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == u'!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return u''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == u'!': + return tag + handle = None + suffix = tag + prefixes = self.tag_prefixes.keys() + prefixes.sort() + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.~*\'()[]' \ + or (ch == u'!' and handle != u'!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = u''.join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch.encode('utf-8'), anchor.encode('utf-8'))) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',?[]{}': + flow_indicators = True + if ch == u':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator(u'\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u'\'': + data = u'\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + self.write_indicator(u'\'', False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'\"': u'\"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ + or not (u'\x20' <= ch <= u'\x7E' + or (self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+u'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_block_hints(self, text): + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + hints += unicode(self.best_indent) + if text[-1] not in u'\n\x85\u2028\u2029': + hints += u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + hints += u'+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'>'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != u' ' \ + and text[start] == u'\n': + self.write_line_break() + leading_space = (ch == u' ') + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + spaces = (ch == u' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'|'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + diff --git a/scripts/clang-tidy/6.0.0/yaml/error.py b/scripts/clang-tidy/6.0.0/yaml/error.py new file mode 100644 index 000000000..577686db5 --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/error.py @@ -0,0 +1,75 @@ + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark(object): + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end].encode('utf-8') + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/scripts/clang-tidy/6.0.0/yaml/events.py b/scripts/clang-tidy/6.0.0/yaml/events.py new file mode 100644 index 000000000..f79ad389c --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/events.py @@ -0,0 +1,86 @@ + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/scripts/clang-tidy/6.0.0/yaml/loader.py b/scripts/clang-tidy/6.0.0/yaml/loader.py new file mode 100644 index 000000000..293ff467b --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/loader.py @@ -0,0 +1,40 @@ + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] + +from reader import * +from scanner import * +from parser import * +from composer import * +from constructor import * +from resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/6.0.0/yaml/nodes.py b/scripts/clang-tidy/6.0.0/yaml/nodes.py new file mode 100644 index 000000000..c4f070c41 --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/nodes.py @@ -0,0 +1,49 @@ + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/scripts/clang-tidy/6.0.0/yaml/parser.py b/scripts/clang-tidy/6.0.0/yaml/parser.py new file mode 100644 index 000000000..f9e3057f3 --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/parser.py @@ -0,0 +1,589 @@ + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from error import MarkedYAMLError +from tokens import * +from events import * +from scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + u'!': u'!', + u'!!': u'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == u'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle.encode('utf-8'), + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle.encode('utf-8'), + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == u'!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), u'', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), u'', mark, mark) + diff --git a/scripts/clang-tidy/6.0.0/yaml/reader.py b/scripts/clang-tidy/6.0.0/yaml/reader.py new file mode 100644 index 000000000..3249e6b9f --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/reader.py @@ -0,0 +1,190 @@ +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from error import YAMLError, Mark + +import codecs, re + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, str): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to unicode, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object, + # - a `unicode` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = u'' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, unicode): + self.name = "" + self.check_printable(stream) + self.buffer = stream+u'\0' + elif isinstance(stream, str): + self.name = "" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "") + self.eof = False + self.raw_buffer = '' + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' \ + or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and len(self.raw_buffer) < 2: + self.update_raw() + if not isinstance(self.raw_buffer, unicode): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError, exc: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += u'\0' + self.raw_buffer = None + break + + def update_raw(self, size=1024): + data = self.stream.read(size) + if data: + self.raw_buffer += data + self.stream_pointer += len(data) + else: + self.eof = True + +#try: +# import psyco +# psyco.bind(Reader) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/6.0.0/yaml/representer.py b/scripts/clang-tidy/6.0.0/yaml/representer.py new file mode 100644 index 000000000..4ea8cb1fe --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/representer.py @@ -0,0 +1,486 @@ + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from error import * +from nodes import * + +import datetime + +import sys, copy_reg, types + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter(object): + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=None): + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def get_classobj_bases(self, cls): + bases = [cls] + for base in cls.__bases__: + bases.extend(self.get_classobj_bases(base)) + return bases + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if type(data) is types.InstanceType: + data_types = self.get_classobj_bases(data.__class__)+list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, unicode(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + add_representer = classmethod(add_representer) + + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + add_multi_representer = classmethod(add_multi_representer) + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + mapping.sort() + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data is None: + return True + if isinstance(data, tuple) and data == (): + return True + if isinstance(data, (str, unicode, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:null', + u'null') + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data): + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + def represent_long(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + else: + value = unicode(repr(data)).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if u'.' not in value and u'e' in value: + value = value.replace(u'e', u'.0e', 1) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = unicode(data.isoformat()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = unicode(data.isoformat(' ')) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object: %s" % data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(unicode, + SafeRepresenter.represent_unicode) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(long, + SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, unicode(data)) + + def represent_complex(self, data): + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') + + def represent_module(self, data): + return self.represent_scalar( + u'tag:yaml.org,2002:python/module:'+data.__name__, u'') + + def represent_instance(self, data): + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed by + # calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never be + # called and the class instance is created by instantiating a trivial + # class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, we + # produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+class_name, state) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:'+class_name, args) + value = {} + if args: + value['args'] = args + value['state'] = state + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:'+class_name, value) + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copy_reg.dispatch_table: + reduce = copy_reg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent object: %r" % data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + +Representer.add_representer(str, + Representer.represent_str) + +Representer.add_representer(unicode, + Representer.represent_unicode) + +Representer.add_representer(long, + Representer.represent_long) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(types.ClassType, + Representer.represent_name) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(types.InstanceType, + Representer.represent_instance) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/scripts/clang-tidy/6.0.0/yaml/resolver.py b/scripts/clang-tidy/6.0.0/yaml/resolver.py new file mode 100644 index 000000000..528fbc0ea --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/resolver.py @@ -0,0 +1,227 @@ + +__all__ = ['BaseResolver', 'Resolver'] + +from error import * +from nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver(object): + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + implicit_resolvers = {} + for key in cls.yaml_implicit_resolvers: + implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:] + cls.yaml_implicit_resolvers = implicit_resolvers + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + add_implicit_resolver = classmethod(add_implicit_resolver) + + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, basestring) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (basestring, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + add_path_resolver = classmethod(add_path_resolver) + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, basestring): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, basestring): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == u'': + resolvers = self.yaml_implicit_resolvers.get(u'', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:bool', + re.compile(ur'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:int', + re.compile(ur'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list(u'-+0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:merge', + re.compile(ur'^(?:<<)$'), + [u'<']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:null', + re.compile(ur'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:timestamp', + re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:value', + re.compile(ur'^(?:=)$'), + [u'=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:yaml', + re.compile(ur'^(?:!|&|\*)$'), + list(u'!&*')) + diff --git a/scripts/clang-tidy/6.0.0/yaml/scanner.py b/scripts/clang-tidy/6.0.0/yaml/scanner.py new file mode 100644 index 000000000..834f662a4 --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/scanner.py @@ -0,0 +1,1453 @@ + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from error import MarkedYAMLError +from tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey(object): + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner(object): + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == u'\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == u'%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == u'-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == u'.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == u'[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == u'{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == u']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == u'}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == u',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == u'-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == u'?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == u':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == u'*': + return self.fetch_alias() + + # Is it an anchor? + if ch == u'&': + return self.fetch_anchor() + + # Is it a tag? + if ch == u'!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == u'|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == u'>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == u'\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == u'\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" + % ch.encode('utf-8'), self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in self.possible_simple_keys.keys(): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be catched by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'---' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'...' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' + and (ch == u'-' or (not self.flow_level and ch in u'?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == u'\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == u'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == u'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not (u'0' <= ch <= u'9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 0 + while u'0' <= self.peek(length) <= u'9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == u' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != u' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == u'*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == u'<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != u'>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + elif ch in u'\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = u'!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in u'\0 \r\n\x85\u2028\u2029': + if ch == u'!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = u'!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = u'!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = u'' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != u'\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in u' \t' + length = 0 + while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != u'\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == u'\n' \ + and leading_non_space and self.peek() not in u' \t': + if not breaks: + chunks.append(u' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == u'\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() != u' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + while self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + u'0': u'\0', + u'a': u'\x07', + u'b': u'\x08', + u't': u'\x09', + u'\t': u'\x09', + u'n': u'\x0A', + u'v': u'\x0B', + u'f': u'\x0C', + u'r': u'\x0D', + u'e': u'\x1B', + u' ': u'\x20', + u'\"': u'\"', + u'\\': u'\\', + u'N': u'\x85', + u'_': u'\xA0', + u'L': u'\u2028', + u'P': u'\u2029', + } + + ESCAPE_CODES = { + u'x': 2, + u'u': 4, + u'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == u'\'' and self.peek(1) == u'\'': + chunks.append(u'\'') + self.forward(2) + elif (double and ch == u'\'') or (not double and ch in u'\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == u'\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k).encode('utf-8')), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(unichr(code)) + self.forward(length) + elif ch in u'\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in u' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == u'\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in u' \t': + self.forward() + if self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ':' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == u'#': + break + while True: + ch = self.peek(length) + if ch in u'\0 \t\r\n\x85\u2028\u2029' \ + or (not self.flow_level and ch == u':' and + self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ + or (self.flow_level and ch in u',:?[]{}'): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if (self.flow_level and ch == u':' + and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): + self.forward(length) + raise ScannerError("while scanning a plain scalar", start_mark, + "found unexpected ':'", self.get_mark(), + "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == u'#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(u''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in u' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != u'!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != u' ': + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if ch != u'!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.!~*\'()[]%': + if ch == u'%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch.encode('utf-8'), + self.get_mark()) + return u''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + bytes = [] + mark = self.get_mark() + while self.peek() == u'%': + self.forward() + for k in range(2): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % + (self.peek(k).encode('utf-8')), self.get_mark()) + bytes.append(chr(int(self.prefix(2), 16))) + self.forward(2) + try: + value = unicode(''.join(bytes), 'utf-8') + except UnicodeDecodeError, exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in u'\r\n\x85': + if self.prefix(2) == u'\r\n': + self.forward(2) + else: + self.forward() + return u'\n' + elif ch in u'\u2028\u2029': + self.forward() + return ch + return u'' + +#try: +# import psyco +# psyco.bind(Scanner) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/6.0.0/yaml/serializer.py b/scripts/clang-tidy/6.0.0/yaml/serializer.py new file mode 100644 index 000000000..0bf1e96dc --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/serializer.py @@ -0,0 +1,111 @@ + +__all__ = ['Serializer', 'SerializerError'] + +from error import YAMLError +from events import * +from nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer(object): + + ANCHOR_TEMPLATE = u'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/scripts/clang-tidy/6.0.0/yaml/tokens.py b/scripts/clang-tidy/6.0.0/yaml/tokens.py new file mode 100644 index 000000000..4d0b48a39 --- /dev/null +++ b/scripts/clang-tidy/6.0.0/yaml/tokens.py @@ -0,0 +1,104 @@ + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '' + +class DirectiveToken(Token): + id = '' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '' + +class DocumentEndToken(Token): + id = '' + +class StreamStartToken(Token): + id = '' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '' + +class BlockSequenceStartToken(Token): + id = '' + +class BlockMappingStartToken(Token): + id = '' + +class BlockEndToken(Token): + id = '' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/scripts/clang-tidy/6.0.1/.travis.yml b/scripts/clang-tidy/6.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang-tidy/6.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang-tidy/6.0.1/README-yaml.md b/scripts/clang-tidy/6.0.1/README-yaml.md new file mode 100644 index 000000000..2cc738ab7 --- /dev/null +++ b/scripts/clang-tidy/6.0.1/README-yaml.md @@ -0,0 +1,13 @@ +This is a copy of `pyyaml-3.12` vendored on april 24, 2018 by @springmeyer. + +https://github.com/mapbox/mason/issues/563 documents why. + +The process to vendor was: + +``` +cd mason +pip install pyyaml --user +cp $(python -m site --user-site)/yaml scripts/clang-tidy/6.0.0/ +``` + +Then the `clang-tidy` package was built and the `yaml` directory was copied beside the `share/run-clang-tidy.py` script (which depends on it). \ No newline at end of file diff --git a/scripts/clang-tidy/6.0.1/script.sh b/scripts/clang-tidy/6.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang-tidy/6.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang-tidy/6.0.1/yaml/__init__.py b/scripts/clang-tidy/6.0.1/yaml/__init__.py new file mode 100644 index 000000000..87c15d38a --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/__init__.py @@ -0,0 +1,315 @@ + +from error import * + +from tokens import * +from events import * +from nodes import * + +from loader import * +from dumper import * + +__version__ = '3.12' + +try: + from cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=Loader, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=Loader): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(object): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __metaclass__ = YAMLObjectMetaclass + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = Loader + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + to_yaml = classmethod(to_yaml) + diff --git a/scripts/clang-tidy/6.0.1/yaml/composer.py b/scripts/clang-tidy/6.0.1/yaml/composer.py new file mode 100644 index 000000000..06e5ac782 --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/composer.py @@ -0,0 +1,139 @@ + +__all__ = ['Composer', 'ComposerError'] + +from error import MarkedYAMLError +from events import * +from nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer(object): + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor.encode('utf-8'), event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurence" + % anchor.encode('utf-8'), self.anchors[anchor].start_mark, + "second occurence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/scripts/clang-tidy/6.0.1/yaml/constructor.py b/scripts/clang-tidy/6.0.1/yaml/constructor.py new file mode 100644 index 000000000..635faac3e --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/constructor.py @@ -0,0 +1,675 @@ + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError'] + +from error import * +from nodes import * + +import datetime + +import binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor(object): + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = generator.next() + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError, exc: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + add_constructor = classmethod(add_constructor) + + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + add_multi_constructor = classmethod(add_multi_constructor) + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + u'yes': True, + u'no': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + value = self.construct_scalar(node) + try: + return str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError), exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + ur'''^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\.(?P[0-9]*))? + (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + try: + return value.encode('ascii') + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), + node.start_mark) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class Constructor(SafeConstructor): + + def construct_python_str(self, node): + return self.construct_scalar(node).encode('utf-8') + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_long(self, node): + return long(self.construct_yaml_int(node)) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + try: + __import__(name) + except ImportError, exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) + return sys.modules[name] + + def find_python_name(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if u'.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = '__builtin__' + object_name = name + try: + __import__(module_name) + except ImportError, exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" % (object_name.encode('utf-8'), + module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + class classobj: pass + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type(self.classobj)) \ + and not args and not kwds: + instance = self.classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(object, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/none', + Constructor.construct_yaml_null) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/bool', + Constructor.construct_yaml_bool) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/str', + Constructor.construct_python_str) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', + Constructor.construct_python_unicode) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/int', + Constructor.construct_yaml_int) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/long', + Constructor.construct_python_long) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/float', + Constructor.construct_yaml_float) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/complex', + Constructor.construct_python_complex) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/list', + Constructor.construct_yaml_seq) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', + Constructor.construct_python_tuple) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/dict', + Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', + Constructor.construct_python_name) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', + Constructor.construct_python_module) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', + Constructor.construct_python_object) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', + Constructor.construct_python_object_apply) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', + Constructor.construct_python_object_new) + diff --git a/scripts/clang-tidy/6.0.1/yaml/cyaml.py b/scripts/clang-tidy/6.0.1/yaml/cyaml.py new file mode 100644 index 000000000..68dcd7519 --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/cyaml.py @@ -0,0 +1,85 @@ + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper'] + +from _yaml import CParser, CEmitter + +from constructor import * + +from serializer import * +from representer import * + +from resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/6.0.1/yaml/dumper.py b/scripts/clang-tidy/6.0.1/yaml/dumper.py new file mode 100644 index 000000000..f811d2c91 --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/dumper.py @@ -0,0 +1,62 @@ + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from emitter import * +from serializer import * +from representer import * +from resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/6.0.1/yaml/emitter.py b/scripts/clang-tidy/6.0.1/yaml/emitter.py new file mode 100644 index 000000000..e5bcdcccb --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/emitter.py @@ -0,0 +1,1140 @@ + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from error import YAMLError +from events import * + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis(object): + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter(object): + + DEFAULT_TAG_PREFIXES = { + u'!' : u'!', + u'tag:yaml.org,2002:' : u'!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = u'\n' + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not getattr(self.stream, 'encoding', None): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = self.event.tags.keys() + handles.sort() + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor(u'&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator(u'[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator(u'{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(u':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator(u'-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(u':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == u'') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" + % (handle.encode('utf-8'))) + for ch in handle[1:-1]: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch.encode('utf-8'), handle.encode('utf-8'))) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == u'!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return u''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == u'!': + return tag + handle = None + suffix = tag + prefixes = self.tag_prefixes.keys() + prefixes.sort() + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.~*\'()[]' \ + or (ch == u'!' and handle != u'!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = u''.join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch.encode('utf-8'), anchor.encode('utf-8'))) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',?[]{}': + flow_indicators = True + if ch == u':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator(u'\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u'\'': + data = u'\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + self.write_indicator(u'\'', False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'\"': u'\"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ + or not (u'\x20' <= ch <= u'\x7E' + or (self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+u'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_block_hints(self, text): + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + hints += unicode(self.best_indent) + if text[-1] not in u'\n\x85\u2028\u2029': + hints += u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + hints += u'+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'>'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != u' ' \ + and text[start] == u'\n': + self.write_line_break() + leading_space = (ch == u' ') + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + spaces = (ch == u' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'|'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + diff --git a/scripts/clang-tidy/6.0.1/yaml/error.py b/scripts/clang-tidy/6.0.1/yaml/error.py new file mode 100644 index 000000000..577686db5 --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/error.py @@ -0,0 +1,75 @@ + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark(object): + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end].encode('utf-8') + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/scripts/clang-tidy/6.0.1/yaml/events.py b/scripts/clang-tidy/6.0.1/yaml/events.py new file mode 100644 index 000000000..f79ad389c --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/events.py @@ -0,0 +1,86 @@ + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/scripts/clang-tidy/6.0.1/yaml/loader.py b/scripts/clang-tidy/6.0.1/yaml/loader.py new file mode 100644 index 000000000..293ff467b --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/loader.py @@ -0,0 +1,40 @@ + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] + +from reader import * +from scanner import * +from parser import * +from composer import * +from constructor import * +from resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/6.0.1/yaml/nodes.py b/scripts/clang-tidy/6.0.1/yaml/nodes.py new file mode 100644 index 000000000..c4f070c41 --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/nodes.py @@ -0,0 +1,49 @@ + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/scripts/clang-tidy/6.0.1/yaml/parser.py b/scripts/clang-tidy/6.0.1/yaml/parser.py new file mode 100644 index 000000000..f9e3057f3 --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/parser.py @@ -0,0 +1,589 @@ + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from error import MarkedYAMLError +from tokens import * +from events import * +from scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + u'!': u'!', + u'!!': u'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == u'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle.encode('utf-8'), + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle.encode('utf-8'), + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == u'!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), u'', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), u'', mark, mark) + diff --git a/scripts/clang-tidy/6.0.1/yaml/reader.py b/scripts/clang-tidy/6.0.1/yaml/reader.py new file mode 100644 index 000000000..3249e6b9f --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/reader.py @@ -0,0 +1,190 @@ +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from error import YAMLError, Mark + +import codecs, re + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, str): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to unicode, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object, + # - a `unicode` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = u'' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, unicode): + self.name = "" + self.check_printable(stream) + self.buffer = stream+u'\0' + elif isinstance(stream, str): + self.name = "" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "") + self.eof = False + self.raw_buffer = '' + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' \ + or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and len(self.raw_buffer) < 2: + self.update_raw() + if not isinstance(self.raw_buffer, unicode): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError, exc: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += u'\0' + self.raw_buffer = None + break + + def update_raw(self, size=1024): + data = self.stream.read(size) + if data: + self.raw_buffer += data + self.stream_pointer += len(data) + else: + self.eof = True + +#try: +# import psyco +# psyco.bind(Reader) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/6.0.1/yaml/representer.py b/scripts/clang-tidy/6.0.1/yaml/representer.py new file mode 100644 index 000000000..4ea8cb1fe --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/representer.py @@ -0,0 +1,486 @@ + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from error import * +from nodes import * + +import datetime + +import sys, copy_reg, types + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter(object): + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=None): + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def get_classobj_bases(self, cls): + bases = [cls] + for base in cls.__bases__: + bases.extend(self.get_classobj_bases(base)) + return bases + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if type(data) is types.InstanceType: + data_types = self.get_classobj_bases(data.__class__)+list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, unicode(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + add_representer = classmethod(add_representer) + + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + add_multi_representer = classmethod(add_multi_representer) + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + mapping.sort() + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data is None: + return True + if isinstance(data, tuple) and data == (): + return True + if isinstance(data, (str, unicode, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:null', + u'null') + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data): + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + def represent_long(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + else: + value = unicode(repr(data)).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if u'.' not in value and u'e' in value: + value = value.replace(u'e', u'.0e', 1) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = unicode(data.isoformat()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = unicode(data.isoformat(' ')) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object: %s" % data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(unicode, + SafeRepresenter.represent_unicode) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(long, + SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, unicode(data)) + + def represent_complex(self, data): + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') + + def represent_module(self, data): + return self.represent_scalar( + u'tag:yaml.org,2002:python/module:'+data.__name__, u'') + + def represent_instance(self, data): + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed by + # calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never be + # called and the class instance is created by instantiating a trivial + # class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, we + # produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+class_name, state) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:'+class_name, args) + value = {} + if args: + value['args'] = args + value['state'] = state + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:'+class_name, value) + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copy_reg.dispatch_table: + reduce = copy_reg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent object: %r" % data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + +Representer.add_representer(str, + Representer.represent_str) + +Representer.add_representer(unicode, + Representer.represent_unicode) + +Representer.add_representer(long, + Representer.represent_long) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(types.ClassType, + Representer.represent_name) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(types.InstanceType, + Representer.represent_instance) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/scripts/clang-tidy/6.0.1/yaml/resolver.py b/scripts/clang-tidy/6.0.1/yaml/resolver.py new file mode 100644 index 000000000..528fbc0ea --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/resolver.py @@ -0,0 +1,227 @@ + +__all__ = ['BaseResolver', 'Resolver'] + +from error import * +from nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver(object): + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + implicit_resolvers = {} + for key in cls.yaml_implicit_resolvers: + implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:] + cls.yaml_implicit_resolvers = implicit_resolvers + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + add_implicit_resolver = classmethod(add_implicit_resolver) + + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, basestring) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (basestring, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + add_path_resolver = classmethod(add_path_resolver) + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, basestring): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, basestring): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == u'': + resolvers = self.yaml_implicit_resolvers.get(u'', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:bool', + re.compile(ur'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:int', + re.compile(ur'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list(u'-+0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:merge', + re.compile(ur'^(?:<<)$'), + [u'<']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:null', + re.compile(ur'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:timestamp', + re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:value', + re.compile(ur'^(?:=)$'), + [u'=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:yaml', + re.compile(ur'^(?:!|&|\*)$'), + list(u'!&*')) + diff --git a/scripts/clang-tidy/6.0.1/yaml/scanner.py b/scripts/clang-tidy/6.0.1/yaml/scanner.py new file mode 100644 index 000000000..834f662a4 --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/scanner.py @@ -0,0 +1,1453 @@ + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from error import MarkedYAMLError +from tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey(object): + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner(object): + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == u'\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == u'%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == u'-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == u'.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == u'[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == u'{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == u']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == u'}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == u',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == u'-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == u'?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == u':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == u'*': + return self.fetch_alias() + + # Is it an anchor? + if ch == u'&': + return self.fetch_anchor() + + # Is it a tag? + if ch == u'!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == u'|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == u'>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == u'\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == u'\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" + % ch.encode('utf-8'), self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in self.possible_simple_keys.keys(): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be catched by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'---' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'...' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' + and (ch == u'-' or (not self.flow_level and ch in u'?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == u'\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == u'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == u'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not (u'0' <= ch <= u'9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 0 + while u'0' <= self.peek(length) <= u'9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == u' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != u' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == u'*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == u'<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != u'>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + elif ch in u'\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = u'!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in u'\0 \r\n\x85\u2028\u2029': + if ch == u'!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = u'!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = u'!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = u'' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != u'\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in u' \t' + length = 0 + while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != u'\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == u'\n' \ + and leading_non_space and self.peek() not in u' \t': + if not breaks: + chunks.append(u' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == u'\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() != u' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + while self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + u'0': u'\0', + u'a': u'\x07', + u'b': u'\x08', + u't': u'\x09', + u'\t': u'\x09', + u'n': u'\x0A', + u'v': u'\x0B', + u'f': u'\x0C', + u'r': u'\x0D', + u'e': u'\x1B', + u' ': u'\x20', + u'\"': u'\"', + u'\\': u'\\', + u'N': u'\x85', + u'_': u'\xA0', + u'L': u'\u2028', + u'P': u'\u2029', + } + + ESCAPE_CODES = { + u'x': 2, + u'u': 4, + u'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == u'\'' and self.peek(1) == u'\'': + chunks.append(u'\'') + self.forward(2) + elif (double and ch == u'\'') or (not double and ch in u'\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == u'\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k).encode('utf-8')), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(unichr(code)) + self.forward(length) + elif ch in u'\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in u' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == u'\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in u' \t': + self.forward() + if self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ':' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == u'#': + break + while True: + ch = self.peek(length) + if ch in u'\0 \t\r\n\x85\u2028\u2029' \ + or (not self.flow_level and ch == u':' and + self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ + or (self.flow_level and ch in u',:?[]{}'): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if (self.flow_level and ch == u':' + and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): + self.forward(length) + raise ScannerError("while scanning a plain scalar", start_mark, + "found unexpected ':'", self.get_mark(), + "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == u'#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(u''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in u' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != u'!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != u' ': + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if ch != u'!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.!~*\'()[]%': + if ch == u'%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch.encode('utf-8'), + self.get_mark()) + return u''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + bytes = [] + mark = self.get_mark() + while self.peek() == u'%': + self.forward() + for k in range(2): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % + (self.peek(k).encode('utf-8')), self.get_mark()) + bytes.append(chr(int(self.prefix(2), 16))) + self.forward(2) + try: + value = unicode(''.join(bytes), 'utf-8') + except UnicodeDecodeError, exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in u'\r\n\x85': + if self.prefix(2) == u'\r\n': + self.forward(2) + else: + self.forward() + return u'\n' + elif ch in u'\u2028\u2029': + self.forward() + return ch + return u'' + +#try: +# import psyco +# psyco.bind(Scanner) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/6.0.1/yaml/serializer.py b/scripts/clang-tidy/6.0.1/yaml/serializer.py new file mode 100644 index 000000000..0bf1e96dc --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/serializer.py @@ -0,0 +1,111 @@ + +__all__ = ['Serializer', 'SerializerError'] + +from error import YAMLError +from events import * +from nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer(object): + + ANCHOR_TEMPLATE = u'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/scripts/clang-tidy/6.0.1/yaml/tokens.py b/scripts/clang-tidy/6.0.1/yaml/tokens.py new file mode 100644 index 000000000..4d0b48a39 --- /dev/null +++ b/scripts/clang-tidy/6.0.1/yaml/tokens.py @@ -0,0 +1,104 @@ + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '' + +class DirectiveToken(Token): + id = '' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '' + +class DocumentEndToken(Token): + id = '' + +class StreamStartToken(Token): + id = '' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '' + +class BlockSequenceStartToken(Token): + id = '' + +class BlockMappingStartToken(Token): + id = '' + +class BlockEndToken(Token): + id = '' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/scripts/clang-tidy/7.0.0/README-yaml.md b/scripts/clang-tidy/7.0.0/README-yaml.md new file mode 100644 index 000000000..2cc738ab7 --- /dev/null +++ b/scripts/clang-tidy/7.0.0/README-yaml.md @@ -0,0 +1,13 @@ +This is a copy of `pyyaml-3.12` vendored on april 24, 2018 by @springmeyer. + +https://github.com/mapbox/mason/issues/563 documents why. + +The process to vendor was: + +``` +cd mason +pip install pyyaml --user +cp $(python -m site --user-site)/yaml scripts/clang-tidy/6.0.0/ +``` + +Then the `clang-tidy` package was built and the `yaml` directory was copied beside the `share/run-clang-tidy.py` script (which depends on it). \ No newline at end of file diff --git a/scripts/clang-tidy/7.0.0/script.sh b/scripts/clang-tidy/7.0.0/script.sh index 391259e3c..c21f18d8a 100755 --- a/scripts/clang-tidy/7.0.0/script.sh +++ b/scripts/clang-tidy/7.0.0/script.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + # dynamically determine the path to this package HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" diff --git a/scripts/clang-tidy/7.0.0/yaml/__init__.py b/scripts/clang-tidy/7.0.0/yaml/__init__.py new file mode 100644 index 000000000..87c15d38a --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/__init__.py @@ -0,0 +1,315 @@ + +from error import * + +from tokens import * +from events import * +from nodes import * + +from loader import * +from dumper import * + +__version__ = '3.12' + +try: + from cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=Loader, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=Loader): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(object): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __metaclass__ = YAMLObjectMetaclass + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = Loader + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + to_yaml = classmethod(to_yaml) + diff --git a/scripts/clang-tidy/7.0.0/yaml/composer.py b/scripts/clang-tidy/7.0.0/yaml/composer.py new file mode 100644 index 000000000..06e5ac782 --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/composer.py @@ -0,0 +1,139 @@ + +__all__ = ['Composer', 'ComposerError'] + +from error import MarkedYAMLError +from events import * +from nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer(object): + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor.encode('utf-8'), event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurence" + % anchor.encode('utf-8'), self.anchors[anchor].start_mark, + "second occurence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/scripts/clang-tidy/7.0.0/yaml/constructor.py b/scripts/clang-tidy/7.0.0/yaml/constructor.py new file mode 100644 index 000000000..635faac3e --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/constructor.py @@ -0,0 +1,675 @@ + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError'] + +from error import * +from nodes import * + +import datetime + +import binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor(object): + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = generator.next() + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError, exc: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + add_constructor = classmethod(add_constructor) + + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + add_multi_constructor = classmethod(add_multi_constructor) + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + u'yes': True, + u'no': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + value = self.construct_scalar(node) + try: + return str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError), exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + ur'''^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\.(?P[0-9]*))? + (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + try: + return value.encode('ascii') + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), + node.start_mark) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class Constructor(SafeConstructor): + + def construct_python_str(self, node): + return self.construct_scalar(node).encode('utf-8') + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_long(self, node): + return long(self.construct_yaml_int(node)) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + try: + __import__(name) + except ImportError, exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) + return sys.modules[name] + + def find_python_name(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if u'.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = '__builtin__' + object_name = name + try: + __import__(module_name) + except ImportError, exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" % (object_name.encode('utf-8'), + module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + class classobj: pass + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type(self.classobj)) \ + and not args and not kwds: + instance = self.classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(object, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/none', + Constructor.construct_yaml_null) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/bool', + Constructor.construct_yaml_bool) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/str', + Constructor.construct_python_str) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', + Constructor.construct_python_unicode) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/int', + Constructor.construct_yaml_int) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/long', + Constructor.construct_python_long) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/float', + Constructor.construct_yaml_float) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/complex', + Constructor.construct_python_complex) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/list', + Constructor.construct_yaml_seq) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', + Constructor.construct_python_tuple) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/dict', + Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', + Constructor.construct_python_name) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', + Constructor.construct_python_module) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', + Constructor.construct_python_object) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', + Constructor.construct_python_object_apply) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', + Constructor.construct_python_object_new) + diff --git a/scripts/clang-tidy/7.0.0/yaml/cyaml.py b/scripts/clang-tidy/7.0.0/yaml/cyaml.py new file mode 100644 index 000000000..68dcd7519 --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/cyaml.py @@ -0,0 +1,85 @@ + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper'] + +from _yaml import CParser, CEmitter + +from constructor import * + +from serializer import * +from representer import * + +from resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/7.0.0/yaml/dumper.py b/scripts/clang-tidy/7.0.0/yaml/dumper.py new file mode 100644 index 000000000..f811d2c91 --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/dumper.py @@ -0,0 +1,62 @@ + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from emitter import * +from serializer import * +from representer import * +from resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/7.0.0/yaml/emitter.py b/scripts/clang-tidy/7.0.0/yaml/emitter.py new file mode 100644 index 000000000..e5bcdcccb --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/emitter.py @@ -0,0 +1,1140 @@ + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from error import YAMLError +from events import * + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis(object): + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter(object): + + DEFAULT_TAG_PREFIXES = { + u'!' : u'!', + u'tag:yaml.org,2002:' : u'!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = u'\n' + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not getattr(self.stream, 'encoding', None): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = self.event.tags.keys() + handles.sort() + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor(u'&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator(u'[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator(u'{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(u':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator(u'-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(u':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == u'') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" + % (handle.encode('utf-8'))) + for ch in handle[1:-1]: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch.encode('utf-8'), handle.encode('utf-8'))) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == u'!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return u''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == u'!': + return tag + handle = None + suffix = tag + prefixes = self.tag_prefixes.keys() + prefixes.sort() + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.~*\'()[]' \ + or (ch == u'!' and handle != u'!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = u''.join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch.encode('utf-8'), anchor.encode('utf-8'))) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',?[]{}': + flow_indicators = True + if ch == u':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator(u'\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u'\'': + data = u'\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + self.write_indicator(u'\'', False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'\"': u'\"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ + or not (u'\x20' <= ch <= u'\x7E' + or (self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+u'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_block_hints(self, text): + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + hints += unicode(self.best_indent) + if text[-1] not in u'\n\x85\u2028\u2029': + hints += u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + hints += u'+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'>'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != u' ' \ + and text[start] == u'\n': + self.write_line_break() + leading_space = (ch == u' ') + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + spaces = (ch == u' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'|'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + diff --git a/scripts/clang-tidy/7.0.0/yaml/error.py b/scripts/clang-tidy/7.0.0/yaml/error.py new file mode 100644 index 000000000..577686db5 --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/error.py @@ -0,0 +1,75 @@ + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark(object): + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end].encode('utf-8') + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/scripts/clang-tidy/7.0.0/yaml/events.py b/scripts/clang-tidy/7.0.0/yaml/events.py new file mode 100644 index 000000000..f79ad389c --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/events.py @@ -0,0 +1,86 @@ + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/scripts/clang-tidy/7.0.0/yaml/loader.py b/scripts/clang-tidy/7.0.0/yaml/loader.py new file mode 100644 index 000000000..293ff467b --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/loader.py @@ -0,0 +1,40 @@ + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] + +from reader import * +from scanner import * +from parser import * +from composer import * +from constructor import * +from resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/7.0.0/yaml/nodes.py b/scripts/clang-tidy/7.0.0/yaml/nodes.py new file mode 100644 index 000000000..c4f070c41 --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/nodes.py @@ -0,0 +1,49 @@ + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/scripts/clang-tidy/7.0.0/yaml/parser.py b/scripts/clang-tidy/7.0.0/yaml/parser.py new file mode 100644 index 000000000..f9e3057f3 --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/parser.py @@ -0,0 +1,589 @@ + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from error import MarkedYAMLError +from tokens import * +from events import * +from scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + u'!': u'!', + u'!!': u'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == u'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle.encode('utf-8'), + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle.encode('utf-8'), + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == u'!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), u'', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), u'', mark, mark) + diff --git a/scripts/clang-tidy/7.0.0/yaml/reader.py b/scripts/clang-tidy/7.0.0/yaml/reader.py new file mode 100644 index 000000000..3249e6b9f --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/reader.py @@ -0,0 +1,190 @@ +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from error import YAMLError, Mark + +import codecs, re + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, str): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to unicode, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object, + # - a `unicode` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = u'' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, unicode): + self.name = "" + self.check_printable(stream) + self.buffer = stream+u'\0' + elif isinstance(stream, str): + self.name = "" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "") + self.eof = False + self.raw_buffer = '' + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' \ + or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and len(self.raw_buffer) < 2: + self.update_raw() + if not isinstance(self.raw_buffer, unicode): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError, exc: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += u'\0' + self.raw_buffer = None + break + + def update_raw(self, size=1024): + data = self.stream.read(size) + if data: + self.raw_buffer += data + self.stream_pointer += len(data) + else: + self.eof = True + +#try: +# import psyco +# psyco.bind(Reader) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/7.0.0/yaml/representer.py b/scripts/clang-tidy/7.0.0/yaml/representer.py new file mode 100644 index 000000000..4ea8cb1fe --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/representer.py @@ -0,0 +1,486 @@ + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from error import * +from nodes import * + +import datetime + +import sys, copy_reg, types + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter(object): + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=None): + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def get_classobj_bases(self, cls): + bases = [cls] + for base in cls.__bases__: + bases.extend(self.get_classobj_bases(base)) + return bases + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if type(data) is types.InstanceType: + data_types = self.get_classobj_bases(data.__class__)+list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, unicode(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + add_representer = classmethod(add_representer) + + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + add_multi_representer = classmethod(add_multi_representer) + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + mapping.sort() + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data is None: + return True + if isinstance(data, tuple) and data == (): + return True + if isinstance(data, (str, unicode, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:null', + u'null') + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data): + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + def represent_long(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + else: + value = unicode(repr(data)).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if u'.' not in value and u'e' in value: + value = value.replace(u'e', u'.0e', 1) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = unicode(data.isoformat()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = unicode(data.isoformat(' ')) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object: %s" % data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(unicode, + SafeRepresenter.represent_unicode) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(long, + SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, unicode(data)) + + def represent_complex(self, data): + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') + + def represent_module(self, data): + return self.represent_scalar( + u'tag:yaml.org,2002:python/module:'+data.__name__, u'') + + def represent_instance(self, data): + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed by + # calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never be + # called and the class instance is created by instantiating a trivial + # class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, we + # produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+class_name, state) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:'+class_name, args) + value = {} + if args: + value['args'] = args + value['state'] = state + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:'+class_name, value) + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copy_reg.dispatch_table: + reduce = copy_reg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent object: %r" % data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + +Representer.add_representer(str, + Representer.represent_str) + +Representer.add_representer(unicode, + Representer.represent_unicode) + +Representer.add_representer(long, + Representer.represent_long) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(types.ClassType, + Representer.represent_name) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(types.InstanceType, + Representer.represent_instance) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/scripts/clang-tidy/7.0.0/yaml/resolver.py b/scripts/clang-tidy/7.0.0/yaml/resolver.py new file mode 100644 index 000000000..528fbc0ea --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/resolver.py @@ -0,0 +1,227 @@ + +__all__ = ['BaseResolver', 'Resolver'] + +from error import * +from nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver(object): + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + implicit_resolvers = {} + for key in cls.yaml_implicit_resolvers: + implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:] + cls.yaml_implicit_resolvers = implicit_resolvers + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + add_implicit_resolver = classmethod(add_implicit_resolver) + + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, basestring) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (basestring, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + add_path_resolver = classmethod(add_path_resolver) + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, basestring): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, basestring): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == u'': + resolvers = self.yaml_implicit_resolvers.get(u'', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:bool', + re.compile(ur'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:int', + re.compile(ur'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list(u'-+0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:merge', + re.compile(ur'^(?:<<)$'), + [u'<']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:null', + re.compile(ur'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:timestamp', + re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:value', + re.compile(ur'^(?:=)$'), + [u'=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:yaml', + re.compile(ur'^(?:!|&|\*)$'), + list(u'!&*')) + diff --git a/scripts/clang-tidy/7.0.0/yaml/scanner.py b/scripts/clang-tidy/7.0.0/yaml/scanner.py new file mode 100644 index 000000000..834f662a4 --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/scanner.py @@ -0,0 +1,1453 @@ + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from error import MarkedYAMLError +from tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey(object): + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner(object): + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == u'\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == u'%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == u'-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == u'.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == u'[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == u'{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == u']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == u'}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == u',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == u'-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == u'?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == u':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == u'*': + return self.fetch_alias() + + # Is it an anchor? + if ch == u'&': + return self.fetch_anchor() + + # Is it a tag? + if ch == u'!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == u'|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == u'>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == u'\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == u'\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" + % ch.encode('utf-8'), self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in self.possible_simple_keys.keys(): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be catched by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'---' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'...' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' + and (ch == u'-' or (not self.flow_level and ch in u'?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == u'\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == u'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == u'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not (u'0' <= ch <= u'9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 0 + while u'0' <= self.peek(length) <= u'9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == u' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != u' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == u'*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == u'<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != u'>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + elif ch in u'\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = u'!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in u'\0 \r\n\x85\u2028\u2029': + if ch == u'!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = u'!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = u'!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = u'' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != u'\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in u' \t' + length = 0 + while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != u'\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == u'\n' \ + and leading_non_space and self.peek() not in u' \t': + if not breaks: + chunks.append(u' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == u'\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() != u' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + while self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + u'0': u'\0', + u'a': u'\x07', + u'b': u'\x08', + u't': u'\x09', + u'\t': u'\x09', + u'n': u'\x0A', + u'v': u'\x0B', + u'f': u'\x0C', + u'r': u'\x0D', + u'e': u'\x1B', + u' ': u'\x20', + u'\"': u'\"', + u'\\': u'\\', + u'N': u'\x85', + u'_': u'\xA0', + u'L': u'\u2028', + u'P': u'\u2029', + } + + ESCAPE_CODES = { + u'x': 2, + u'u': 4, + u'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == u'\'' and self.peek(1) == u'\'': + chunks.append(u'\'') + self.forward(2) + elif (double and ch == u'\'') or (not double and ch in u'\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == u'\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k).encode('utf-8')), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(unichr(code)) + self.forward(length) + elif ch in u'\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in u' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == u'\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in u' \t': + self.forward() + if self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ':' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == u'#': + break + while True: + ch = self.peek(length) + if ch in u'\0 \t\r\n\x85\u2028\u2029' \ + or (not self.flow_level and ch == u':' and + self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ + or (self.flow_level and ch in u',:?[]{}'): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if (self.flow_level and ch == u':' + and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): + self.forward(length) + raise ScannerError("while scanning a plain scalar", start_mark, + "found unexpected ':'", self.get_mark(), + "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == u'#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(u''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in u' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != u'!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != u' ': + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if ch != u'!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.!~*\'()[]%': + if ch == u'%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch.encode('utf-8'), + self.get_mark()) + return u''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + bytes = [] + mark = self.get_mark() + while self.peek() == u'%': + self.forward() + for k in range(2): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % + (self.peek(k).encode('utf-8')), self.get_mark()) + bytes.append(chr(int(self.prefix(2), 16))) + self.forward(2) + try: + value = unicode(''.join(bytes), 'utf-8') + except UnicodeDecodeError, exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in u'\r\n\x85': + if self.prefix(2) == u'\r\n': + self.forward(2) + else: + self.forward() + return u'\n' + elif ch in u'\u2028\u2029': + self.forward() + return ch + return u'' + +#try: +# import psyco +# psyco.bind(Scanner) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/7.0.0/yaml/serializer.py b/scripts/clang-tidy/7.0.0/yaml/serializer.py new file mode 100644 index 000000000..0bf1e96dc --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/serializer.py @@ -0,0 +1,111 @@ + +__all__ = ['Serializer', 'SerializerError'] + +from error import YAMLError +from events import * +from nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer(object): + + ANCHOR_TEMPLATE = u'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/scripts/clang-tidy/7.0.0/yaml/tokens.py b/scripts/clang-tidy/7.0.0/yaml/tokens.py new file mode 100644 index 000000000..4d0b48a39 --- /dev/null +++ b/scripts/clang-tidy/7.0.0/yaml/tokens.py @@ -0,0 +1,104 @@ + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '' + +class DirectiveToken(Token): + id = '' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '' + +class DocumentEndToken(Token): + id = '' + +class StreamStartToken(Token): + id = '' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '' + +class BlockSequenceStartToken(Token): + id = '' + +class BlockMappingStartToken(Token): + id = '' + +class BlockEndToken(Token): + id = '' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/scripts/clang-tidy/7.0.1/.travis.yml b/scripts/clang-tidy/7.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang-tidy/7.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang-tidy/7.0.1/README-yaml.md b/scripts/clang-tidy/7.0.1/README-yaml.md new file mode 100644 index 000000000..2cc738ab7 --- /dev/null +++ b/scripts/clang-tidy/7.0.1/README-yaml.md @@ -0,0 +1,13 @@ +This is a copy of `pyyaml-3.12` vendored on april 24, 2018 by @springmeyer. + +https://github.com/mapbox/mason/issues/563 documents why. + +The process to vendor was: + +``` +cd mason +pip install pyyaml --user +cp $(python -m site --user-site)/yaml scripts/clang-tidy/6.0.0/ +``` + +Then the `clang-tidy` package was built and the `yaml` directory was copied beside the `share/run-clang-tidy.py` script (which depends on it). \ No newline at end of file diff --git a/scripts/clang-tidy/7.0.1/script.sh b/scripts/clang-tidy/7.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang-tidy/7.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang-tidy/7.0.1/yaml/__init__.py b/scripts/clang-tidy/7.0.1/yaml/__init__.py new file mode 100644 index 000000000..87c15d38a --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/__init__.py @@ -0,0 +1,315 @@ + +from error import * + +from tokens import * +from events import * +from nodes import * + +from loader import * +from dumper import * + +__version__ = '3.12' + +try: + from cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=Loader, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=Loader): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(object): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __metaclass__ = YAMLObjectMetaclass + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = Loader + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + to_yaml = classmethod(to_yaml) + diff --git a/scripts/clang-tidy/7.0.1/yaml/composer.py b/scripts/clang-tidy/7.0.1/yaml/composer.py new file mode 100644 index 000000000..06e5ac782 --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/composer.py @@ -0,0 +1,139 @@ + +__all__ = ['Composer', 'ComposerError'] + +from error import MarkedYAMLError +from events import * +from nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer(object): + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor.encode('utf-8'), event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurence" + % anchor.encode('utf-8'), self.anchors[anchor].start_mark, + "second occurence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/scripts/clang-tidy/7.0.1/yaml/constructor.py b/scripts/clang-tidy/7.0.1/yaml/constructor.py new file mode 100644 index 000000000..635faac3e --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/constructor.py @@ -0,0 +1,675 @@ + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError'] + +from error import * +from nodes import * + +import datetime + +import binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor(object): + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = generator.next() + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError, exc: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + add_constructor = classmethod(add_constructor) + + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + add_multi_constructor = classmethod(add_multi_constructor) + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + u'yes': True, + u'no': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + value = self.construct_scalar(node) + try: + return str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError), exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + ur'''^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\.(?P[0-9]*))? + (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + try: + return value.encode('ascii') + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), + node.start_mark) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class Constructor(SafeConstructor): + + def construct_python_str(self, node): + return self.construct_scalar(node).encode('utf-8') + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_long(self, node): + return long(self.construct_yaml_int(node)) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + try: + __import__(name) + except ImportError, exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) + return sys.modules[name] + + def find_python_name(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if u'.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = '__builtin__' + object_name = name + try: + __import__(module_name) + except ImportError, exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" % (object_name.encode('utf-8'), + module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + class classobj: pass + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type(self.classobj)) \ + and not args and not kwds: + instance = self.classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(object, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/none', + Constructor.construct_yaml_null) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/bool', + Constructor.construct_yaml_bool) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/str', + Constructor.construct_python_str) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', + Constructor.construct_python_unicode) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/int', + Constructor.construct_yaml_int) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/long', + Constructor.construct_python_long) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/float', + Constructor.construct_yaml_float) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/complex', + Constructor.construct_python_complex) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/list', + Constructor.construct_yaml_seq) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', + Constructor.construct_python_tuple) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/dict', + Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', + Constructor.construct_python_name) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', + Constructor.construct_python_module) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', + Constructor.construct_python_object) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', + Constructor.construct_python_object_apply) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', + Constructor.construct_python_object_new) + diff --git a/scripts/clang-tidy/7.0.1/yaml/cyaml.py b/scripts/clang-tidy/7.0.1/yaml/cyaml.py new file mode 100644 index 000000000..68dcd7519 --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/cyaml.py @@ -0,0 +1,85 @@ + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper'] + +from _yaml import CParser, CEmitter + +from constructor import * + +from serializer import * +from representer import * + +from resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/7.0.1/yaml/dumper.py b/scripts/clang-tidy/7.0.1/yaml/dumper.py new file mode 100644 index 000000000..f811d2c91 --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/dumper.py @@ -0,0 +1,62 @@ + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from emitter import * +from serializer import * +from representer import * +from resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/7.0.1/yaml/emitter.py b/scripts/clang-tidy/7.0.1/yaml/emitter.py new file mode 100644 index 000000000..e5bcdcccb --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/emitter.py @@ -0,0 +1,1140 @@ + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from error import YAMLError +from events import * + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis(object): + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter(object): + + DEFAULT_TAG_PREFIXES = { + u'!' : u'!', + u'tag:yaml.org,2002:' : u'!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = u'\n' + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not getattr(self.stream, 'encoding', None): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = self.event.tags.keys() + handles.sort() + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor(u'&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator(u'[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator(u'{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(u':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator(u'-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(u':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == u'') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" + % (handle.encode('utf-8'))) + for ch in handle[1:-1]: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch.encode('utf-8'), handle.encode('utf-8'))) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == u'!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return u''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == u'!': + return tag + handle = None + suffix = tag + prefixes = self.tag_prefixes.keys() + prefixes.sort() + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.~*\'()[]' \ + or (ch == u'!' and handle != u'!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = u''.join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch.encode('utf-8'), anchor.encode('utf-8'))) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',?[]{}': + flow_indicators = True + if ch == u':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator(u'\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u'\'': + data = u'\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + self.write_indicator(u'\'', False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'\"': u'\"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ + or not (u'\x20' <= ch <= u'\x7E' + or (self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+u'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_block_hints(self, text): + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + hints += unicode(self.best_indent) + if text[-1] not in u'\n\x85\u2028\u2029': + hints += u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + hints += u'+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'>'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != u' ' \ + and text[start] == u'\n': + self.write_line_break() + leading_space = (ch == u' ') + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + spaces = (ch == u' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'|'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + diff --git a/scripts/clang-tidy/7.0.1/yaml/error.py b/scripts/clang-tidy/7.0.1/yaml/error.py new file mode 100644 index 000000000..577686db5 --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/error.py @@ -0,0 +1,75 @@ + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark(object): + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end].encode('utf-8') + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/scripts/clang-tidy/7.0.1/yaml/events.py b/scripts/clang-tidy/7.0.1/yaml/events.py new file mode 100644 index 000000000..f79ad389c --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/events.py @@ -0,0 +1,86 @@ + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/scripts/clang-tidy/7.0.1/yaml/loader.py b/scripts/clang-tidy/7.0.1/yaml/loader.py new file mode 100644 index 000000000..293ff467b --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/loader.py @@ -0,0 +1,40 @@ + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] + +from reader import * +from scanner import * +from parser import * +from composer import * +from constructor import * +from resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/7.0.1/yaml/nodes.py b/scripts/clang-tidy/7.0.1/yaml/nodes.py new file mode 100644 index 000000000..c4f070c41 --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/nodes.py @@ -0,0 +1,49 @@ + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/scripts/clang-tidy/7.0.1/yaml/parser.py b/scripts/clang-tidy/7.0.1/yaml/parser.py new file mode 100644 index 000000000..f9e3057f3 --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/parser.py @@ -0,0 +1,589 @@ + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from error import MarkedYAMLError +from tokens import * +from events import * +from scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + u'!': u'!', + u'!!': u'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == u'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle.encode('utf-8'), + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle.encode('utf-8'), + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == u'!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), u'', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), u'', mark, mark) + diff --git a/scripts/clang-tidy/7.0.1/yaml/reader.py b/scripts/clang-tidy/7.0.1/yaml/reader.py new file mode 100644 index 000000000..3249e6b9f --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/reader.py @@ -0,0 +1,190 @@ +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from error import YAMLError, Mark + +import codecs, re + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, str): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to unicode, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object, + # - a `unicode` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = u'' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, unicode): + self.name = "" + self.check_printable(stream) + self.buffer = stream+u'\0' + elif isinstance(stream, str): + self.name = "" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "") + self.eof = False + self.raw_buffer = '' + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' \ + or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and len(self.raw_buffer) < 2: + self.update_raw() + if not isinstance(self.raw_buffer, unicode): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError, exc: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += u'\0' + self.raw_buffer = None + break + + def update_raw(self, size=1024): + data = self.stream.read(size) + if data: + self.raw_buffer += data + self.stream_pointer += len(data) + else: + self.eof = True + +#try: +# import psyco +# psyco.bind(Reader) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/7.0.1/yaml/representer.py b/scripts/clang-tidy/7.0.1/yaml/representer.py new file mode 100644 index 000000000..4ea8cb1fe --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/representer.py @@ -0,0 +1,486 @@ + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from error import * +from nodes import * + +import datetime + +import sys, copy_reg, types + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter(object): + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=None): + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def get_classobj_bases(self, cls): + bases = [cls] + for base in cls.__bases__: + bases.extend(self.get_classobj_bases(base)) + return bases + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if type(data) is types.InstanceType: + data_types = self.get_classobj_bases(data.__class__)+list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, unicode(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + add_representer = classmethod(add_representer) + + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + add_multi_representer = classmethod(add_multi_representer) + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + mapping.sort() + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data is None: + return True + if isinstance(data, tuple) and data == (): + return True + if isinstance(data, (str, unicode, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:null', + u'null') + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data): + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + def represent_long(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + else: + value = unicode(repr(data)).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if u'.' not in value and u'e' in value: + value = value.replace(u'e', u'.0e', 1) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = unicode(data.isoformat()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = unicode(data.isoformat(' ')) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object: %s" % data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(unicode, + SafeRepresenter.represent_unicode) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(long, + SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, unicode(data)) + + def represent_complex(self, data): + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') + + def represent_module(self, data): + return self.represent_scalar( + u'tag:yaml.org,2002:python/module:'+data.__name__, u'') + + def represent_instance(self, data): + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed by + # calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never be + # called and the class instance is created by instantiating a trivial + # class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, we + # produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+class_name, state) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:'+class_name, args) + value = {} + if args: + value['args'] = args + value['state'] = state + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:'+class_name, value) + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copy_reg.dispatch_table: + reduce = copy_reg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent object: %r" % data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + +Representer.add_representer(str, + Representer.represent_str) + +Representer.add_representer(unicode, + Representer.represent_unicode) + +Representer.add_representer(long, + Representer.represent_long) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(types.ClassType, + Representer.represent_name) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(types.InstanceType, + Representer.represent_instance) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/scripts/clang-tidy/7.0.1/yaml/resolver.py b/scripts/clang-tidy/7.0.1/yaml/resolver.py new file mode 100644 index 000000000..528fbc0ea --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/resolver.py @@ -0,0 +1,227 @@ + +__all__ = ['BaseResolver', 'Resolver'] + +from error import * +from nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver(object): + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + implicit_resolvers = {} + for key in cls.yaml_implicit_resolvers: + implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:] + cls.yaml_implicit_resolvers = implicit_resolvers + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + add_implicit_resolver = classmethod(add_implicit_resolver) + + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, basestring) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (basestring, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + add_path_resolver = classmethod(add_path_resolver) + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, basestring): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, basestring): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == u'': + resolvers = self.yaml_implicit_resolvers.get(u'', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:bool', + re.compile(ur'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:int', + re.compile(ur'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list(u'-+0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:merge', + re.compile(ur'^(?:<<)$'), + [u'<']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:null', + re.compile(ur'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:timestamp', + re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:value', + re.compile(ur'^(?:=)$'), + [u'=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:yaml', + re.compile(ur'^(?:!|&|\*)$'), + list(u'!&*')) + diff --git a/scripts/clang-tidy/7.0.1/yaml/scanner.py b/scripts/clang-tidy/7.0.1/yaml/scanner.py new file mode 100644 index 000000000..834f662a4 --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/scanner.py @@ -0,0 +1,1453 @@ + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from error import MarkedYAMLError +from tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey(object): + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner(object): + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == u'\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == u'%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == u'-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == u'.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == u'[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == u'{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == u']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == u'}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == u',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == u'-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == u'?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == u':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == u'*': + return self.fetch_alias() + + # Is it an anchor? + if ch == u'&': + return self.fetch_anchor() + + # Is it a tag? + if ch == u'!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == u'|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == u'>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == u'\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == u'\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" + % ch.encode('utf-8'), self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in self.possible_simple_keys.keys(): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be catched by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'---' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'...' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' + and (ch == u'-' or (not self.flow_level and ch in u'?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == u'\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == u'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == u'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not (u'0' <= ch <= u'9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 0 + while u'0' <= self.peek(length) <= u'9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == u' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != u' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == u'*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == u'<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != u'>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + elif ch in u'\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = u'!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in u'\0 \r\n\x85\u2028\u2029': + if ch == u'!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = u'!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = u'!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = u'' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != u'\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in u' \t' + length = 0 + while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != u'\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == u'\n' \ + and leading_non_space and self.peek() not in u' \t': + if not breaks: + chunks.append(u' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == u'\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() != u' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + while self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + u'0': u'\0', + u'a': u'\x07', + u'b': u'\x08', + u't': u'\x09', + u'\t': u'\x09', + u'n': u'\x0A', + u'v': u'\x0B', + u'f': u'\x0C', + u'r': u'\x0D', + u'e': u'\x1B', + u' ': u'\x20', + u'\"': u'\"', + u'\\': u'\\', + u'N': u'\x85', + u'_': u'\xA0', + u'L': u'\u2028', + u'P': u'\u2029', + } + + ESCAPE_CODES = { + u'x': 2, + u'u': 4, + u'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == u'\'' and self.peek(1) == u'\'': + chunks.append(u'\'') + self.forward(2) + elif (double and ch == u'\'') or (not double and ch in u'\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == u'\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k).encode('utf-8')), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(unichr(code)) + self.forward(length) + elif ch in u'\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in u' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == u'\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in u' \t': + self.forward() + if self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ':' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == u'#': + break + while True: + ch = self.peek(length) + if ch in u'\0 \t\r\n\x85\u2028\u2029' \ + or (not self.flow_level and ch == u':' and + self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ + or (self.flow_level and ch in u',:?[]{}'): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if (self.flow_level and ch == u':' + and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): + self.forward(length) + raise ScannerError("while scanning a plain scalar", start_mark, + "found unexpected ':'", self.get_mark(), + "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == u'#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(u''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in u' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != u'!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != u' ': + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if ch != u'!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.!~*\'()[]%': + if ch == u'%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch.encode('utf-8'), + self.get_mark()) + return u''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + bytes = [] + mark = self.get_mark() + while self.peek() == u'%': + self.forward() + for k in range(2): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % + (self.peek(k).encode('utf-8')), self.get_mark()) + bytes.append(chr(int(self.prefix(2), 16))) + self.forward(2) + try: + value = unicode(''.join(bytes), 'utf-8') + except UnicodeDecodeError, exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in u'\r\n\x85': + if self.prefix(2) == u'\r\n': + self.forward(2) + else: + self.forward() + return u'\n' + elif ch in u'\u2028\u2029': + self.forward() + return ch + return u'' + +#try: +# import psyco +# psyco.bind(Scanner) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/7.0.1/yaml/serializer.py b/scripts/clang-tidy/7.0.1/yaml/serializer.py new file mode 100644 index 000000000..0bf1e96dc --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/serializer.py @@ -0,0 +1,111 @@ + +__all__ = ['Serializer', 'SerializerError'] + +from error import YAMLError +from events import * +from nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer(object): + + ANCHOR_TEMPLATE = u'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/scripts/clang-tidy/7.0.1/yaml/tokens.py b/scripts/clang-tidy/7.0.1/yaml/tokens.py new file mode 100644 index 000000000..4d0b48a39 --- /dev/null +++ b/scripts/clang-tidy/7.0.1/yaml/tokens.py @@ -0,0 +1,104 @@ + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '' + +class DirectiveToken(Token): + id = '' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '' + +class DocumentEndToken(Token): + id = '' + +class StreamStartToken(Token): + id = '' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '' + +class BlockSequenceStartToken(Token): + id = '' + +class BlockMappingStartToken(Token): + id = '' + +class BlockEndToken(Token): + id = '' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/scripts/clang-tidy/8.0.0/.travis.yml b/scripts/clang-tidy/8.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang-tidy/8.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang-tidy/8.0.0/README-yaml.md b/scripts/clang-tidy/8.0.0/README-yaml.md new file mode 100644 index 000000000..2cc738ab7 --- /dev/null +++ b/scripts/clang-tidy/8.0.0/README-yaml.md @@ -0,0 +1,13 @@ +This is a copy of `pyyaml-3.12` vendored on april 24, 2018 by @springmeyer. + +https://github.com/mapbox/mason/issues/563 documents why. + +The process to vendor was: + +``` +cd mason +pip install pyyaml --user +cp $(python -m site --user-site)/yaml scripts/clang-tidy/6.0.0/ +``` + +Then the `clang-tidy` package was built and the `yaml` directory was copied beside the `share/run-clang-tidy.py` script (which depends on it). \ No newline at end of file diff --git a/scripts/clang-tidy/8.0.0/script.sh b/scripts/clang-tidy/8.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang-tidy/8.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang-tidy/8.0.0/yaml/__init__.py b/scripts/clang-tidy/8.0.0/yaml/__init__.py new file mode 100644 index 000000000..87c15d38a --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/__init__.py @@ -0,0 +1,315 @@ + +from error import * + +from tokens import * +from events import * +from nodes import * + +from loader import * +from dumper import * + +__version__ = '3.12' + +try: + from cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=Loader, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=Loader): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(object): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __metaclass__ = YAMLObjectMetaclass + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = Loader + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + to_yaml = classmethod(to_yaml) + diff --git a/scripts/clang-tidy/8.0.0/yaml/composer.py b/scripts/clang-tidy/8.0.0/yaml/composer.py new file mode 100644 index 000000000..06e5ac782 --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/composer.py @@ -0,0 +1,139 @@ + +__all__ = ['Composer', 'ComposerError'] + +from error import MarkedYAMLError +from events import * +from nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer(object): + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor.encode('utf-8'), event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurence" + % anchor.encode('utf-8'), self.anchors[anchor].start_mark, + "second occurence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/scripts/clang-tidy/8.0.0/yaml/constructor.py b/scripts/clang-tidy/8.0.0/yaml/constructor.py new file mode 100644 index 000000000..635faac3e --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/constructor.py @@ -0,0 +1,675 @@ + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError'] + +from error import * +from nodes import * + +import datetime + +import binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor(object): + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = generator.next() + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError, exc: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + add_constructor = classmethod(add_constructor) + + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + add_multi_constructor = classmethod(add_multi_constructor) + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + u'yes': True, + u'no': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + value = self.construct_scalar(node) + try: + return str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError), exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + ur'''^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\.(?P[0-9]*))? + (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + try: + return value.encode('ascii') + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), + node.start_mark) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class Constructor(SafeConstructor): + + def construct_python_str(self, node): + return self.construct_scalar(node).encode('utf-8') + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_long(self, node): + return long(self.construct_yaml_int(node)) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + try: + __import__(name) + except ImportError, exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) + return sys.modules[name] + + def find_python_name(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if u'.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = '__builtin__' + object_name = name + try: + __import__(module_name) + except ImportError, exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" % (object_name.encode('utf-8'), + module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + class classobj: pass + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type(self.classobj)) \ + and not args and not kwds: + instance = self.classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(object, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/none', + Constructor.construct_yaml_null) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/bool', + Constructor.construct_yaml_bool) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/str', + Constructor.construct_python_str) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', + Constructor.construct_python_unicode) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/int', + Constructor.construct_yaml_int) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/long', + Constructor.construct_python_long) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/float', + Constructor.construct_yaml_float) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/complex', + Constructor.construct_python_complex) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/list', + Constructor.construct_yaml_seq) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', + Constructor.construct_python_tuple) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/dict', + Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', + Constructor.construct_python_name) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', + Constructor.construct_python_module) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', + Constructor.construct_python_object) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', + Constructor.construct_python_object_apply) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', + Constructor.construct_python_object_new) + diff --git a/scripts/clang-tidy/8.0.0/yaml/cyaml.py b/scripts/clang-tidy/8.0.0/yaml/cyaml.py new file mode 100644 index 000000000..68dcd7519 --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/cyaml.py @@ -0,0 +1,85 @@ + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper'] + +from _yaml import CParser, CEmitter + +from constructor import * + +from serializer import * +from representer import * + +from resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/8.0.0/yaml/dumper.py b/scripts/clang-tidy/8.0.0/yaml/dumper.py new file mode 100644 index 000000000..f811d2c91 --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/dumper.py @@ -0,0 +1,62 @@ + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from emitter import * +from serializer import * +from representer import * +from resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/8.0.0/yaml/emitter.py b/scripts/clang-tidy/8.0.0/yaml/emitter.py new file mode 100644 index 000000000..e5bcdcccb --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/emitter.py @@ -0,0 +1,1140 @@ + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from error import YAMLError +from events import * + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis(object): + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter(object): + + DEFAULT_TAG_PREFIXES = { + u'!' : u'!', + u'tag:yaml.org,2002:' : u'!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = u'\n' + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not getattr(self.stream, 'encoding', None): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = self.event.tags.keys() + handles.sort() + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor(u'&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator(u'[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator(u'{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(u':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator(u'-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(u':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == u'') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" + % (handle.encode('utf-8'))) + for ch in handle[1:-1]: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch.encode('utf-8'), handle.encode('utf-8'))) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == u'!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return u''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == u'!': + return tag + handle = None + suffix = tag + prefixes = self.tag_prefixes.keys() + prefixes.sort() + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.~*\'()[]' \ + or (ch == u'!' and handle != u'!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = u''.join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch.encode('utf-8'), anchor.encode('utf-8'))) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',?[]{}': + flow_indicators = True + if ch == u':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator(u'\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u'\'': + data = u'\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + self.write_indicator(u'\'', False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'\"': u'\"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ + or not (u'\x20' <= ch <= u'\x7E' + or (self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+u'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_block_hints(self, text): + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + hints += unicode(self.best_indent) + if text[-1] not in u'\n\x85\u2028\u2029': + hints += u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + hints += u'+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'>'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != u' ' \ + and text[start] == u'\n': + self.write_line_break() + leading_space = (ch == u' ') + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + spaces = (ch == u' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'|'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + diff --git a/scripts/clang-tidy/8.0.0/yaml/error.py b/scripts/clang-tidy/8.0.0/yaml/error.py new file mode 100644 index 000000000..577686db5 --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/error.py @@ -0,0 +1,75 @@ + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark(object): + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end].encode('utf-8') + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/scripts/clang-tidy/8.0.0/yaml/events.py b/scripts/clang-tidy/8.0.0/yaml/events.py new file mode 100644 index 000000000..f79ad389c --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/events.py @@ -0,0 +1,86 @@ + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/scripts/clang-tidy/8.0.0/yaml/loader.py b/scripts/clang-tidy/8.0.0/yaml/loader.py new file mode 100644 index 000000000..293ff467b --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/loader.py @@ -0,0 +1,40 @@ + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] + +from reader import * +from scanner import * +from parser import * +from composer import * +from constructor import * +from resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/8.0.0/yaml/nodes.py b/scripts/clang-tidy/8.0.0/yaml/nodes.py new file mode 100644 index 000000000..c4f070c41 --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/nodes.py @@ -0,0 +1,49 @@ + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/scripts/clang-tidy/8.0.0/yaml/parser.py b/scripts/clang-tidy/8.0.0/yaml/parser.py new file mode 100644 index 000000000..f9e3057f3 --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/parser.py @@ -0,0 +1,589 @@ + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from error import MarkedYAMLError +from tokens import * +from events import * +from scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + u'!': u'!', + u'!!': u'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == u'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle.encode('utf-8'), + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle.encode('utf-8'), + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == u'!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), u'', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), u'', mark, mark) + diff --git a/scripts/clang-tidy/8.0.0/yaml/reader.py b/scripts/clang-tidy/8.0.0/yaml/reader.py new file mode 100644 index 000000000..3249e6b9f --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/reader.py @@ -0,0 +1,190 @@ +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from error import YAMLError, Mark + +import codecs, re + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, str): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to unicode, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object, + # - a `unicode` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = u'' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, unicode): + self.name = "" + self.check_printable(stream) + self.buffer = stream+u'\0' + elif isinstance(stream, str): + self.name = "" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "") + self.eof = False + self.raw_buffer = '' + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' \ + or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and len(self.raw_buffer) < 2: + self.update_raw() + if not isinstance(self.raw_buffer, unicode): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError, exc: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += u'\0' + self.raw_buffer = None + break + + def update_raw(self, size=1024): + data = self.stream.read(size) + if data: + self.raw_buffer += data + self.stream_pointer += len(data) + else: + self.eof = True + +#try: +# import psyco +# psyco.bind(Reader) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/8.0.0/yaml/representer.py b/scripts/clang-tidy/8.0.0/yaml/representer.py new file mode 100644 index 000000000..4ea8cb1fe --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/representer.py @@ -0,0 +1,486 @@ + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from error import * +from nodes import * + +import datetime + +import sys, copy_reg, types + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter(object): + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=None): + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def get_classobj_bases(self, cls): + bases = [cls] + for base in cls.__bases__: + bases.extend(self.get_classobj_bases(base)) + return bases + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if type(data) is types.InstanceType: + data_types = self.get_classobj_bases(data.__class__)+list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, unicode(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + add_representer = classmethod(add_representer) + + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + add_multi_representer = classmethod(add_multi_representer) + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + mapping.sort() + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data is None: + return True + if isinstance(data, tuple) and data == (): + return True + if isinstance(data, (str, unicode, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:null', + u'null') + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data): + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + def represent_long(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + else: + value = unicode(repr(data)).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if u'.' not in value and u'e' in value: + value = value.replace(u'e', u'.0e', 1) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = unicode(data.isoformat()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = unicode(data.isoformat(' ')) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object: %s" % data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(unicode, + SafeRepresenter.represent_unicode) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(long, + SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, unicode(data)) + + def represent_complex(self, data): + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') + + def represent_module(self, data): + return self.represent_scalar( + u'tag:yaml.org,2002:python/module:'+data.__name__, u'') + + def represent_instance(self, data): + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed by + # calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never be + # called and the class instance is created by instantiating a trivial + # class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, we + # produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+class_name, state) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:'+class_name, args) + value = {} + if args: + value['args'] = args + value['state'] = state + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:'+class_name, value) + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copy_reg.dispatch_table: + reduce = copy_reg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent object: %r" % data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + +Representer.add_representer(str, + Representer.represent_str) + +Representer.add_representer(unicode, + Representer.represent_unicode) + +Representer.add_representer(long, + Representer.represent_long) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(types.ClassType, + Representer.represent_name) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(types.InstanceType, + Representer.represent_instance) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/scripts/clang-tidy/8.0.0/yaml/resolver.py b/scripts/clang-tidy/8.0.0/yaml/resolver.py new file mode 100644 index 000000000..528fbc0ea --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/resolver.py @@ -0,0 +1,227 @@ + +__all__ = ['BaseResolver', 'Resolver'] + +from error import * +from nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver(object): + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + implicit_resolvers = {} + for key in cls.yaml_implicit_resolvers: + implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:] + cls.yaml_implicit_resolvers = implicit_resolvers + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + add_implicit_resolver = classmethod(add_implicit_resolver) + + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, basestring) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (basestring, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + add_path_resolver = classmethod(add_path_resolver) + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, basestring): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, basestring): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == u'': + resolvers = self.yaml_implicit_resolvers.get(u'', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:bool', + re.compile(ur'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:int', + re.compile(ur'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list(u'-+0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:merge', + re.compile(ur'^(?:<<)$'), + [u'<']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:null', + re.compile(ur'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:timestamp', + re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:value', + re.compile(ur'^(?:=)$'), + [u'=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:yaml', + re.compile(ur'^(?:!|&|\*)$'), + list(u'!&*')) + diff --git a/scripts/clang-tidy/8.0.0/yaml/scanner.py b/scripts/clang-tidy/8.0.0/yaml/scanner.py new file mode 100644 index 000000000..834f662a4 --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/scanner.py @@ -0,0 +1,1453 @@ + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from error import MarkedYAMLError +from tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey(object): + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner(object): + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == u'\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == u'%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == u'-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == u'.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == u'[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == u'{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == u']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == u'}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == u',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == u'-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == u'?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == u':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == u'*': + return self.fetch_alias() + + # Is it an anchor? + if ch == u'&': + return self.fetch_anchor() + + # Is it a tag? + if ch == u'!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == u'|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == u'>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == u'\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == u'\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" + % ch.encode('utf-8'), self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in self.possible_simple_keys.keys(): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be catched by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'---' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'...' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' + and (ch == u'-' or (not self.flow_level and ch in u'?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == u'\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == u'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == u'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not (u'0' <= ch <= u'9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 0 + while u'0' <= self.peek(length) <= u'9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == u' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != u' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == u'*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == u'<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != u'>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + elif ch in u'\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = u'!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in u'\0 \r\n\x85\u2028\u2029': + if ch == u'!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = u'!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = u'!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = u'' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != u'\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in u' \t' + length = 0 + while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != u'\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == u'\n' \ + and leading_non_space and self.peek() not in u' \t': + if not breaks: + chunks.append(u' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == u'\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() != u' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + while self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + u'0': u'\0', + u'a': u'\x07', + u'b': u'\x08', + u't': u'\x09', + u'\t': u'\x09', + u'n': u'\x0A', + u'v': u'\x0B', + u'f': u'\x0C', + u'r': u'\x0D', + u'e': u'\x1B', + u' ': u'\x20', + u'\"': u'\"', + u'\\': u'\\', + u'N': u'\x85', + u'_': u'\xA0', + u'L': u'\u2028', + u'P': u'\u2029', + } + + ESCAPE_CODES = { + u'x': 2, + u'u': 4, + u'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == u'\'' and self.peek(1) == u'\'': + chunks.append(u'\'') + self.forward(2) + elif (double and ch == u'\'') or (not double and ch in u'\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == u'\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k).encode('utf-8')), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(unichr(code)) + self.forward(length) + elif ch in u'\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in u' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == u'\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in u' \t': + self.forward() + if self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ':' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == u'#': + break + while True: + ch = self.peek(length) + if ch in u'\0 \t\r\n\x85\u2028\u2029' \ + or (not self.flow_level and ch == u':' and + self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ + or (self.flow_level and ch in u',:?[]{}'): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if (self.flow_level and ch == u':' + and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): + self.forward(length) + raise ScannerError("while scanning a plain scalar", start_mark, + "found unexpected ':'", self.get_mark(), + "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == u'#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(u''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in u' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != u'!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != u' ': + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if ch != u'!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.!~*\'()[]%': + if ch == u'%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch.encode('utf-8'), + self.get_mark()) + return u''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + bytes = [] + mark = self.get_mark() + while self.peek() == u'%': + self.forward() + for k in range(2): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % + (self.peek(k).encode('utf-8')), self.get_mark()) + bytes.append(chr(int(self.prefix(2), 16))) + self.forward(2) + try: + value = unicode(''.join(bytes), 'utf-8') + except UnicodeDecodeError, exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in u'\r\n\x85': + if self.prefix(2) == u'\r\n': + self.forward(2) + else: + self.forward() + return u'\n' + elif ch in u'\u2028\u2029': + self.forward() + return ch + return u'' + +#try: +# import psyco +# psyco.bind(Scanner) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/8.0.0/yaml/serializer.py b/scripts/clang-tidy/8.0.0/yaml/serializer.py new file mode 100644 index 000000000..0bf1e96dc --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/serializer.py @@ -0,0 +1,111 @@ + +__all__ = ['Serializer', 'SerializerError'] + +from error import YAMLError +from events import * +from nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer(object): + + ANCHOR_TEMPLATE = u'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/scripts/clang-tidy/8.0.0/yaml/tokens.py b/scripts/clang-tidy/8.0.0/yaml/tokens.py new file mode 100644 index 000000000..4d0b48a39 --- /dev/null +++ b/scripts/clang-tidy/8.0.0/yaml/tokens.py @@ -0,0 +1,104 @@ + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '' + +class DirectiveToken(Token): + id = '' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '' + +class DocumentEndToken(Token): + id = '' + +class StreamStartToken(Token): + id = '' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '' + +class BlockSequenceStartToken(Token): + id = '' + +class BlockMappingStartToken(Token): + id = '' + +class BlockEndToken(Token): + id = '' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/scripts/clang-tidy/9.0.0/.travis.yml b/scripts/clang-tidy/9.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang-tidy/9.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang-tidy/9.0.0/README-yaml.md b/scripts/clang-tidy/9.0.0/README-yaml.md new file mode 100644 index 000000000..2cc738ab7 --- /dev/null +++ b/scripts/clang-tidy/9.0.0/README-yaml.md @@ -0,0 +1,13 @@ +This is a copy of `pyyaml-3.12` vendored on april 24, 2018 by @springmeyer. + +https://github.com/mapbox/mason/issues/563 documents why. + +The process to vendor was: + +``` +cd mason +pip install pyyaml --user +cp $(python -m site --user-site)/yaml scripts/clang-tidy/6.0.0/ +``` + +Then the `clang-tidy` package was built and the `yaml` directory was copied beside the `share/run-clang-tidy.py` script (which depends on it). \ No newline at end of file diff --git a/scripts/clang-tidy/9.0.0/script.sh b/scripts/clang-tidy/9.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang-tidy/9.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang-tidy/9.0.0/yaml/__init__.py b/scripts/clang-tidy/9.0.0/yaml/__init__.py new file mode 100644 index 000000000..87c15d38a --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/__init__.py @@ -0,0 +1,315 @@ + +from error import * + +from tokens import * +from events import * +from nodes import * + +from loader import * +from dumper import * + +__version__ = '3.12' + +try: + from cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=Loader, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=Loader): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(object): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __metaclass__ = YAMLObjectMetaclass + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = Loader + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + to_yaml = classmethod(to_yaml) + diff --git a/scripts/clang-tidy/9.0.0/yaml/composer.py b/scripts/clang-tidy/9.0.0/yaml/composer.py new file mode 100644 index 000000000..06e5ac782 --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/composer.py @@ -0,0 +1,139 @@ + +__all__ = ['Composer', 'ComposerError'] + +from error import MarkedYAMLError +from events import * +from nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer(object): + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor.encode('utf-8'), event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurence" + % anchor.encode('utf-8'), self.anchors[anchor].start_mark, + "second occurence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/scripts/clang-tidy/9.0.0/yaml/constructor.py b/scripts/clang-tidy/9.0.0/yaml/constructor.py new file mode 100644 index 000000000..635faac3e --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/constructor.py @@ -0,0 +1,675 @@ + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError'] + +from error import * +from nodes import * + +import datetime + +import binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor(object): + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = generator.next() + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError, exc: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + add_constructor = classmethod(add_constructor) + + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + add_multi_constructor = classmethod(add_multi_constructor) + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + u'yes': True, + u'no': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + value = self.construct_scalar(node) + try: + return str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError), exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + ur'''^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\.(?P[0-9]*))? + (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + try: + return value.encode('ascii') + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), + node.start_mark) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class Constructor(SafeConstructor): + + def construct_python_str(self, node): + return self.construct_scalar(node).encode('utf-8') + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_long(self, node): + return long(self.construct_yaml_int(node)) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + try: + __import__(name) + except ImportError, exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) + return sys.modules[name] + + def find_python_name(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if u'.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = '__builtin__' + object_name = name + try: + __import__(module_name) + except ImportError, exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" % (object_name.encode('utf-8'), + module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + class classobj: pass + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type(self.classobj)) \ + and not args and not kwds: + instance = self.classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(object, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/none', + Constructor.construct_yaml_null) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/bool', + Constructor.construct_yaml_bool) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/str', + Constructor.construct_python_str) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', + Constructor.construct_python_unicode) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/int', + Constructor.construct_yaml_int) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/long', + Constructor.construct_python_long) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/float', + Constructor.construct_yaml_float) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/complex', + Constructor.construct_python_complex) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/list', + Constructor.construct_yaml_seq) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', + Constructor.construct_python_tuple) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/dict', + Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', + Constructor.construct_python_name) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', + Constructor.construct_python_module) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', + Constructor.construct_python_object) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', + Constructor.construct_python_object_apply) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', + Constructor.construct_python_object_new) + diff --git a/scripts/clang-tidy/9.0.0/yaml/cyaml.py b/scripts/clang-tidy/9.0.0/yaml/cyaml.py new file mode 100644 index 000000000..68dcd7519 --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/cyaml.py @@ -0,0 +1,85 @@ + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper'] + +from _yaml import CParser, CEmitter + +from constructor import * + +from serializer import * +from representer import * + +from resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/9.0.0/yaml/dumper.py b/scripts/clang-tidy/9.0.0/yaml/dumper.py new file mode 100644 index 000000000..f811d2c91 --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/dumper.py @@ -0,0 +1,62 @@ + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from emitter import * +from serializer import * +from representer import * +from resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/9.0.0/yaml/emitter.py b/scripts/clang-tidy/9.0.0/yaml/emitter.py new file mode 100644 index 000000000..e5bcdcccb --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/emitter.py @@ -0,0 +1,1140 @@ + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from error import YAMLError +from events import * + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis(object): + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter(object): + + DEFAULT_TAG_PREFIXES = { + u'!' : u'!', + u'tag:yaml.org,2002:' : u'!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = u'\n' + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not getattr(self.stream, 'encoding', None): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = self.event.tags.keys() + handles.sort() + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor(u'&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator(u'[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator(u'{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(u':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator(u'-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(u':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == u'') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" + % (handle.encode('utf-8'))) + for ch in handle[1:-1]: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch.encode('utf-8'), handle.encode('utf-8'))) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == u'!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return u''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == u'!': + return tag + handle = None + suffix = tag + prefixes = self.tag_prefixes.keys() + prefixes.sort() + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.~*\'()[]' \ + or (ch == u'!' and handle != u'!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = u''.join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch.encode('utf-8'), anchor.encode('utf-8'))) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',?[]{}': + flow_indicators = True + if ch == u':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator(u'\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u'\'': + data = u'\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + self.write_indicator(u'\'', False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'\"': u'\"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ + or not (u'\x20' <= ch <= u'\x7E' + or (self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+u'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_block_hints(self, text): + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + hints += unicode(self.best_indent) + if text[-1] not in u'\n\x85\u2028\u2029': + hints += u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + hints += u'+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'>'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != u' ' \ + and text[start] == u'\n': + self.write_line_break() + leading_space = (ch == u' ') + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + spaces = (ch == u' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'|'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + diff --git a/scripts/clang-tidy/9.0.0/yaml/error.py b/scripts/clang-tidy/9.0.0/yaml/error.py new file mode 100644 index 000000000..577686db5 --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/error.py @@ -0,0 +1,75 @@ + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark(object): + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end].encode('utf-8') + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/scripts/clang-tidy/9.0.0/yaml/events.py b/scripts/clang-tidy/9.0.0/yaml/events.py new file mode 100644 index 000000000..f79ad389c --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/events.py @@ -0,0 +1,86 @@ + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/scripts/clang-tidy/9.0.0/yaml/loader.py b/scripts/clang-tidy/9.0.0/yaml/loader.py new file mode 100644 index 000000000..293ff467b --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/loader.py @@ -0,0 +1,40 @@ + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] + +from reader import * +from scanner import * +from parser import * +from composer import * +from constructor import * +from resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/9.0.0/yaml/nodes.py b/scripts/clang-tidy/9.0.0/yaml/nodes.py new file mode 100644 index 000000000..c4f070c41 --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/nodes.py @@ -0,0 +1,49 @@ + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/scripts/clang-tidy/9.0.0/yaml/parser.py b/scripts/clang-tidy/9.0.0/yaml/parser.py new file mode 100644 index 000000000..f9e3057f3 --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/parser.py @@ -0,0 +1,589 @@ + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from error import MarkedYAMLError +from tokens import * +from events import * +from scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + u'!': u'!', + u'!!': u'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == u'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle.encode('utf-8'), + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle.encode('utf-8'), + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == u'!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), u'', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), u'', mark, mark) + diff --git a/scripts/clang-tidy/9.0.0/yaml/reader.py b/scripts/clang-tidy/9.0.0/yaml/reader.py new file mode 100644 index 000000000..3249e6b9f --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/reader.py @@ -0,0 +1,190 @@ +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from error import YAMLError, Mark + +import codecs, re + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, str): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to unicode, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object, + # - a `unicode` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = u'' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, unicode): + self.name = "" + self.check_printable(stream) + self.buffer = stream+u'\0' + elif isinstance(stream, str): + self.name = "" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "") + self.eof = False + self.raw_buffer = '' + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' \ + or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and len(self.raw_buffer) < 2: + self.update_raw() + if not isinstance(self.raw_buffer, unicode): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError, exc: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += u'\0' + self.raw_buffer = None + break + + def update_raw(self, size=1024): + data = self.stream.read(size) + if data: + self.raw_buffer += data + self.stream_pointer += len(data) + else: + self.eof = True + +#try: +# import psyco +# psyco.bind(Reader) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/9.0.0/yaml/representer.py b/scripts/clang-tidy/9.0.0/yaml/representer.py new file mode 100644 index 000000000..4ea8cb1fe --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/representer.py @@ -0,0 +1,486 @@ + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from error import * +from nodes import * + +import datetime + +import sys, copy_reg, types + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter(object): + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=None): + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def get_classobj_bases(self, cls): + bases = [cls] + for base in cls.__bases__: + bases.extend(self.get_classobj_bases(base)) + return bases + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if type(data) is types.InstanceType: + data_types = self.get_classobj_bases(data.__class__)+list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, unicode(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + add_representer = classmethod(add_representer) + + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + add_multi_representer = classmethod(add_multi_representer) + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + mapping.sort() + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data is None: + return True + if isinstance(data, tuple) and data == (): + return True + if isinstance(data, (str, unicode, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:null', + u'null') + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data): + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + def represent_long(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + else: + value = unicode(repr(data)).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if u'.' not in value and u'e' in value: + value = value.replace(u'e', u'.0e', 1) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = unicode(data.isoformat()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = unicode(data.isoformat(' ')) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object: %s" % data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(unicode, + SafeRepresenter.represent_unicode) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(long, + SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, unicode(data)) + + def represent_complex(self, data): + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') + + def represent_module(self, data): + return self.represent_scalar( + u'tag:yaml.org,2002:python/module:'+data.__name__, u'') + + def represent_instance(self, data): + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed by + # calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never be + # called and the class instance is created by instantiating a trivial + # class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, we + # produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+class_name, state) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:'+class_name, args) + value = {} + if args: + value['args'] = args + value['state'] = state + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:'+class_name, value) + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copy_reg.dispatch_table: + reduce = copy_reg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent object: %r" % data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + +Representer.add_representer(str, + Representer.represent_str) + +Representer.add_representer(unicode, + Representer.represent_unicode) + +Representer.add_representer(long, + Representer.represent_long) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(types.ClassType, + Representer.represent_name) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(types.InstanceType, + Representer.represent_instance) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/scripts/clang-tidy/9.0.0/yaml/resolver.py b/scripts/clang-tidy/9.0.0/yaml/resolver.py new file mode 100644 index 000000000..528fbc0ea --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/resolver.py @@ -0,0 +1,227 @@ + +__all__ = ['BaseResolver', 'Resolver'] + +from error import * +from nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver(object): + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + implicit_resolvers = {} + for key in cls.yaml_implicit_resolvers: + implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:] + cls.yaml_implicit_resolvers = implicit_resolvers + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + add_implicit_resolver = classmethod(add_implicit_resolver) + + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, basestring) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (basestring, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + add_path_resolver = classmethod(add_path_resolver) + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, basestring): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, basestring): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == u'': + resolvers = self.yaml_implicit_resolvers.get(u'', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:bool', + re.compile(ur'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:int', + re.compile(ur'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list(u'-+0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:merge', + re.compile(ur'^(?:<<)$'), + [u'<']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:null', + re.compile(ur'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:timestamp', + re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:value', + re.compile(ur'^(?:=)$'), + [u'=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:yaml', + re.compile(ur'^(?:!|&|\*)$'), + list(u'!&*')) + diff --git a/scripts/clang-tidy/9.0.0/yaml/scanner.py b/scripts/clang-tidy/9.0.0/yaml/scanner.py new file mode 100644 index 000000000..834f662a4 --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/scanner.py @@ -0,0 +1,1453 @@ + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from error import MarkedYAMLError +from tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey(object): + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner(object): + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == u'\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == u'%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == u'-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == u'.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == u'[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == u'{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == u']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == u'}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == u',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == u'-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == u'?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == u':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == u'*': + return self.fetch_alias() + + # Is it an anchor? + if ch == u'&': + return self.fetch_anchor() + + # Is it a tag? + if ch == u'!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == u'|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == u'>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == u'\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == u'\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" + % ch.encode('utf-8'), self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in self.possible_simple_keys.keys(): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be catched by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'---' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'...' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' + and (ch == u'-' or (not self.flow_level and ch in u'?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == u'\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == u'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == u'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not (u'0' <= ch <= u'9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 0 + while u'0' <= self.peek(length) <= u'9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == u' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != u' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == u'*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == u'<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != u'>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + elif ch in u'\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = u'!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in u'\0 \r\n\x85\u2028\u2029': + if ch == u'!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = u'!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = u'!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = u'' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != u'\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in u' \t' + length = 0 + while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != u'\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == u'\n' \ + and leading_non_space and self.peek() not in u' \t': + if not breaks: + chunks.append(u' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == u'\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() != u' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + while self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + u'0': u'\0', + u'a': u'\x07', + u'b': u'\x08', + u't': u'\x09', + u'\t': u'\x09', + u'n': u'\x0A', + u'v': u'\x0B', + u'f': u'\x0C', + u'r': u'\x0D', + u'e': u'\x1B', + u' ': u'\x20', + u'\"': u'\"', + u'\\': u'\\', + u'N': u'\x85', + u'_': u'\xA0', + u'L': u'\u2028', + u'P': u'\u2029', + } + + ESCAPE_CODES = { + u'x': 2, + u'u': 4, + u'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == u'\'' and self.peek(1) == u'\'': + chunks.append(u'\'') + self.forward(2) + elif (double and ch == u'\'') or (not double and ch in u'\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == u'\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k).encode('utf-8')), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(unichr(code)) + self.forward(length) + elif ch in u'\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in u' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == u'\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in u' \t': + self.forward() + if self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ':' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == u'#': + break + while True: + ch = self.peek(length) + if ch in u'\0 \t\r\n\x85\u2028\u2029' \ + or (not self.flow_level and ch == u':' and + self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ + or (self.flow_level and ch in u',:?[]{}'): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if (self.flow_level and ch == u':' + and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): + self.forward(length) + raise ScannerError("while scanning a plain scalar", start_mark, + "found unexpected ':'", self.get_mark(), + "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == u'#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(u''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in u' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != u'!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != u' ': + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if ch != u'!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.!~*\'()[]%': + if ch == u'%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch.encode('utf-8'), + self.get_mark()) + return u''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + bytes = [] + mark = self.get_mark() + while self.peek() == u'%': + self.forward() + for k in range(2): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % + (self.peek(k).encode('utf-8')), self.get_mark()) + bytes.append(chr(int(self.prefix(2), 16))) + self.forward(2) + try: + value = unicode(''.join(bytes), 'utf-8') + except UnicodeDecodeError, exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in u'\r\n\x85': + if self.prefix(2) == u'\r\n': + self.forward(2) + else: + self.forward() + return u'\n' + elif ch in u'\u2028\u2029': + self.forward() + return ch + return u'' + +#try: +# import psyco +# psyco.bind(Scanner) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/9.0.0/yaml/serializer.py b/scripts/clang-tidy/9.0.0/yaml/serializer.py new file mode 100644 index 000000000..0bf1e96dc --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/serializer.py @@ -0,0 +1,111 @@ + +__all__ = ['Serializer', 'SerializerError'] + +from error import YAMLError +from events import * +from nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer(object): + + ANCHOR_TEMPLATE = u'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/scripts/clang-tidy/9.0.0/yaml/tokens.py b/scripts/clang-tidy/9.0.0/yaml/tokens.py new file mode 100644 index 000000000..4d0b48a39 --- /dev/null +++ b/scripts/clang-tidy/9.0.0/yaml/tokens.py @@ -0,0 +1,104 @@ + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '' + +class DirectiveToken(Token): + id = '' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '' + +class DocumentEndToken(Token): + id = '' + +class StreamStartToken(Token): + id = '' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '' + +class BlockSequenceStartToken(Token): + id = '' + +class BlockMappingStartToken(Token): + id = '' + +class BlockEndToken(Token): + id = '' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/scripts/clang-tidy/9.0.1/.travis.yml b/scripts/clang-tidy/9.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/clang-tidy/9.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/clang-tidy/9.0.1/README-yaml.md b/scripts/clang-tidy/9.0.1/README-yaml.md new file mode 100644 index 000000000..2cc738ab7 --- /dev/null +++ b/scripts/clang-tidy/9.0.1/README-yaml.md @@ -0,0 +1,13 @@ +This is a copy of `pyyaml-3.12` vendored on april 24, 2018 by @springmeyer. + +https://github.com/mapbox/mason/issues/563 documents why. + +The process to vendor was: + +``` +cd mason +pip install pyyaml --user +cp $(python -m site --user-site)/yaml scripts/clang-tidy/6.0.0/ +``` + +Then the `clang-tidy` package was built and the `yaml` directory was copied beside the `share/run-clang-tidy.py` script (which depends on it). \ No newline at end of file diff --git a/scripts/clang-tidy/9.0.1/script.sh b/scripts/clang-tidy/9.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/clang-tidy/9.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/clang-tidy/9.0.1/yaml/__init__.py b/scripts/clang-tidy/9.0.1/yaml/__init__.py new file mode 100644 index 000000000..87c15d38a --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/__init__.py @@ -0,0 +1,315 @@ + +from error import * + +from tokens import * +from events import * +from nodes import * + +from loader import * +from dumper import * + +__version__ = '3.12' + +try: + from cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + from StringIO import StringIO + else: + from cStringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=Loader, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=Loader): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(object): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __metaclass__ = YAMLObjectMetaclass + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = Loader + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + to_yaml = classmethod(to_yaml) + diff --git a/scripts/clang-tidy/9.0.1/yaml/composer.py b/scripts/clang-tidy/9.0.1/yaml/composer.py new file mode 100644 index 000000000..06e5ac782 --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/composer.py @@ -0,0 +1,139 @@ + +__all__ = ['Composer', 'ComposerError'] + +from error import MarkedYAMLError +from events import * +from nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer(object): + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor.encode('utf-8'), event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurence" + % anchor.encode('utf-8'), self.anchors[anchor].start_mark, + "second occurence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/scripts/clang-tidy/9.0.1/yaml/constructor.py b/scripts/clang-tidy/9.0.1/yaml/constructor.py new file mode 100644 index 000000000..635faac3e --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/constructor.py @@ -0,0 +1,675 @@ + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError'] + +from error import * +from nodes import * + +import datetime + +import binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor(object): + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = generator.next() + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError, exc: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + add_constructor = classmethod(add_constructor) + + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + add_multi_constructor = classmethod(add_multi_constructor) + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + u'yes': True, + u'no': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + value = self.construct_scalar(node) + try: + return str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError), exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + ur'''^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\.(?P[0-9]*))? + (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + try: + return value.encode('ascii') + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), + node.start_mark) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class Constructor(SafeConstructor): + + def construct_python_str(self, node): + return self.construct_scalar(node).encode('utf-8') + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_long(self, node): + return long(self.construct_yaml_int(node)) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + try: + __import__(name) + except ImportError, exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) + return sys.modules[name] + + def find_python_name(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if u'.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = '__builtin__' + object_name = name + try: + __import__(module_name) + except ImportError, exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" % (object_name.encode('utf-8'), + module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + class classobj: pass + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type(self.classobj)) \ + and not args and not kwds: + instance = self.classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(object, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/none', + Constructor.construct_yaml_null) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/bool', + Constructor.construct_yaml_bool) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/str', + Constructor.construct_python_str) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', + Constructor.construct_python_unicode) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/int', + Constructor.construct_yaml_int) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/long', + Constructor.construct_python_long) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/float', + Constructor.construct_yaml_float) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/complex', + Constructor.construct_python_complex) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/list', + Constructor.construct_yaml_seq) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', + Constructor.construct_python_tuple) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/dict', + Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', + Constructor.construct_python_name) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', + Constructor.construct_python_module) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', + Constructor.construct_python_object) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', + Constructor.construct_python_object_apply) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', + Constructor.construct_python_object_new) + diff --git a/scripts/clang-tidy/9.0.1/yaml/cyaml.py b/scripts/clang-tidy/9.0.1/yaml/cyaml.py new file mode 100644 index 000000000..68dcd7519 --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/cyaml.py @@ -0,0 +1,85 @@ + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper'] + +from _yaml import CParser, CEmitter + +from constructor import * + +from serializer import * +from representer import * + +from resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/9.0.1/yaml/dumper.py b/scripts/clang-tidy/9.0.1/yaml/dumper.py new file mode 100644 index 000000000..f811d2c91 --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/dumper.py @@ -0,0 +1,62 @@ + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from emitter import * +from serializer import * +from representer import * +from resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/9.0.1/yaml/emitter.py b/scripts/clang-tidy/9.0.1/yaml/emitter.py new file mode 100644 index 000000000..e5bcdcccb --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/emitter.py @@ -0,0 +1,1140 @@ + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from error import YAMLError +from events import * + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis(object): + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter(object): + + DEFAULT_TAG_PREFIXES = { + u'!' : u'!', + u'tag:yaml.org,2002:' : u'!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = u'\n' + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not getattr(self.stream, 'encoding', None): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = self.event.tags.keys() + handles.sort() + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor(u'&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator(u'[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator(u'{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(u':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator(u'-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(u':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == u'') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" + % (handle.encode('utf-8'))) + for ch in handle[1:-1]: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch.encode('utf-8'), handle.encode('utf-8'))) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == u'!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return u''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == u'!': + return tag + handle = None + suffix = tag + prefixes = self.tag_prefixes.keys() + prefixes.sort() + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.~*\'()[]' \ + or (ch == u'!' and handle != u'!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = u''.join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch.encode('utf-8'), anchor.encode('utf-8'))) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',?[]{}': + flow_indicators = True + if ch == u':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator(u'\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u'\'': + data = u'\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + self.write_indicator(u'\'', False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'\"': u'\"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ + or not (u'\x20' <= ch <= u'\x7E' + or (self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+u'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_block_hints(self, text): + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + hints += unicode(self.best_indent) + if text[-1] not in u'\n\x85\u2028\u2029': + hints += u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + hints += u'+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'>'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != u' ' \ + and text[start] == u'\n': + self.write_line_break() + leading_space = (ch == u' ') + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + spaces = (ch == u' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator(u'|'+hints, True) + if hints[-1:] == u'+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + diff --git a/scripts/clang-tidy/9.0.1/yaml/error.py b/scripts/clang-tidy/9.0.1/yaml/error.py new file mode 100644 index 000000000..577686db5 --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/error.py @@ -0,0 +1,75 @@ + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark(object): + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end].encode('utf-8') + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/scripts/clang-tidy/9.0.1/yaml/events.py b/scripts/clang-tidy/9.0.1/yaml/events.py new file mode 100644 index 000000000..f79ad389c --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/events.py @@ -0,0 +1,86 @@ + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/scripts/clang-tidy/9.0.1/yaml/loader.py b/scripts/clang-tidy/9.0.1/yaml/loader.py new file mode 100644 index 000000000..293ff467b --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/loader.py @@ -0,0 +1,40 @@ + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] + +from reader import * +from scanner import * +from parser import * +from composer import * +from constructor import * +from resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + diff --git a/scripts/clang-tidy/9.0.1/yaml/nodes.py b/scripts/clang-tidy/9.0.1/yaml/nodes.py new file mode 100644 index 000000000..c4f070c41 --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/nodes.py @@ -0,0 +1,49 @@ + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/scripts/clang-tidy/9.0.1/yaml/parser.py b/scripts/clang-tidy/9.0.1/yaml/parser.py new file mode 100644 index 000000000..f9e3057f3 --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/parser.py @@ -0,0 +1,589 @@ + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from error import MarkedYAMLError +from tokens import * +from events import * +from scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + u'!': u'!', + u'!!': u'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == u'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle.encode('utf-8'), + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle.encode('utf-8'), + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == u'!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), u'', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), u'', mark, mark) + diff --git a/scripts/clang-tidy/9.0.1/yaml/reader.py b/scripts/clang-tidy/9.0.1/yaml/reader.py new file mode 100644 index 000000000..3249e6b9f --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/reader.py @@ -0,0 +1,190 @@ +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from error import YAMLError, Mark + +import codecs, re + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, str): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to unicode, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object, + # - a `unicode` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = u'' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, unicode): + self.name = "" + self.check_printable(stream) + self.buffer = stream+u'\0' + elif isinstance(stream, str): + self.name = "" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "") + self.eof = False + self.raw_buffer = '' + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' \ + or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and len(self.raw_buffer) < 2: + self.update_raw() + if not isinstance(self.raw_buffer, unicode): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError, exc: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += u'\0' + self.raw_buffer = None + break + + def update_raw(self, size=1024): + data = self.stream.read(size) + if data: + self.raw_buffer += data + self.stream_pointer += len(data) + else: + self.eof = True + +#try: +# import psyco +# psyco.bind(Reader) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/9.0.1/yaml/representer.py b/scripts/clang-tidy/9.0.1/yaml/representer.py new file mode 100644 index 000000000..4ea8cb1fe --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/representer.py @@ -0,0 +1,486 @@ + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from error import * +from nodes import * + +import datetime + +import sys, copy_reg, types + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter(object): + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=None): + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def get_classobj_bases(self, cls): + bases = [cls] + for base in cls.__bases__: + bases.extend(self.get_classobj_bases(base)) + return bases + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if type(data) is types.InstanceType: + data_types = self.get_classobj_bases(data.__class__)+list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, unicode(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + add_representer = classmethod(add_representer) + + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + add_multi_representer = classmethod(add_multi_representer) + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + mapping.sort() + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data is None: + return True + if isinstance(data, tuple) and data == (): + return True + if isinstance(data, (str, unicode, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:null', + u'null') + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data): + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + def represent_long(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + else: + value = unicode(repr(data)).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if u'.' not in value and u'e' in value: + value = value.replace(u'e', u'.0e', 1) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = unicode(data.isoformat()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = unicode(data.isoformat(' ')) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object: %s" % data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(unicode, + SafeRepresenter.represent_unicode) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(long, + SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, unicode(data)) + + def represent_complex(self, data): + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') + + def represent_module(self, data): + return self.represent_scalar( + u'tag:yaml.org,2002:python/module:'+data.__name__, u'') + + def represent_instance(self, data): + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed by + # calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never be + # called and the class instance is created by instantiating a trivial + # class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, we + # produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+class_name, state) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:'+class_name, args) + value = {} + if args: + value['args'] = args + value['state'] = state + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:'+class_name, value) + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copy_reg.dispatch_table: + reduce = copy_reg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent object: %r" % data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + +Representer.add_representer(str, + Representer.represent_str) + +Representer.add_representer(unicode, + Representer.represent_unicode) + +Representer.add_representer(long, + Representer.represent_long) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(types.ClassType, + Representer.represent_name) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(types.InstanceType, + Representer.represent_instance) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/scripts/clang-tidy/9.0.1/yaml/resolver.py b/scripts/clang-tidy/9.0.1/yaml/resolver.py new file mode 100644 index 000000000..528fbc0ea --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/resolver.py @@ -0,0 +1,227 @@ + +__all__ = ['BaseResolver', 'Resolver'] + +from error import * +from nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver(object): + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + implicit_resolvers = {} + for key in cls.yaml_implicit_resolvers: + implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:] + cls.yaml_implicit_resolvers = implicit_resolvers + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + add_implicit_resolver = classmethod(add_implicit_resolver) + + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, basestring) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (basestring, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + add_path_resolver = classmethod(add_path_resolver) + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, basestring): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, basestring): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == u'': + resolvers = self.yaml_implicit_resolvers.get(u'', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:bool', + re.compile(ur'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:int', + re.compile(ur'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list(u'-+0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:merge', + re.compile(ur'^(?:<<)$'), + [u'<']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:null', + re.compile(ur'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:timestamp', + re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:value', + re.compile(ur'^(?:=)$'), + [u'=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:yaml', + re.compile(ur'^(?:!|&|\*)$'), + list(u'!&*')) + diff --git a/scripts/clang-tidy/9.0.1/yaml/scanner.py b/scripts/clang-tidy/9.0.1/yaml/scanner.py new file mode 100644 index 000000000..834f662a4 --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/scanner.py @@ -0,0 +1,1453 @@ + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from error import MarkedYAMLError +from tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey(object): + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner(object): + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == u'\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == u'%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == u'-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == u'.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == u'[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == u'{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == u']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == u'}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == u',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == u'-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == u'?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == u':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == u'*': + return self.fetch_alias() + + # Is it an anchor? + if ch == u'&': + return self.fetch_anchor() + + # Is it a tag? + if ch == u'!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == u'|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == u'>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == u'\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == u'\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" + % ch.encode('utf-8'), self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in self.possible_simple_keys.keys(): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be catched by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'---' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'...' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' + and (ch == u'-' or (not self.flow_level and ch in u'?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == u'\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == u'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == u'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not (u'0' <= ch <= u'9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 0 + while u'0' <= self.peek(length) <= u'9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == u' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != u' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == u'*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == u'<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != u'>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + elif ch in u'\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = u'!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in u'\0 \r\n\x85\u2028\u2029': + if ch == u'!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = u'!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = u'!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = u'' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != u'\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in u' \t' + length = 0 + while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != u'\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == u'\n' \ + and leading_non_space and self.peek() not in u' \t': + if not breaks: + chunks.append(u' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == u'\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() != u' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + while self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + u'0': u'\0', + u'a': u'\x07', + u'b': u'\x08', + u't': u'\x09', + u'\t': u'\x09', + u'n': u'\x0A', + u'v': u'\x0B', + u'f': u'\x0C', + u'r': u'\x0D', + u'e': u'\x1B', + u' ': u'\x20', + u'\"': u'\"', + u'\\': u'\\', + u'N': u'\x85', + u'_': u'\xA0', + u'L': u'\u2028', + u'P': u'\u2029', + } + + ESCAPE_CODES = { + u'x': 2, + u'u': 4, + u'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == u'\'' and self.peek(1) == u'\'': + chunks.append(u'\'') + self.forward(2) + elif (double and ch == u'\'') or (not double and ch in u'\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == u'\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k).encode('utf-8')), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(unichr(code)) + self.forward(length) + elif ch in u'\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in u' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == u'\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in u' \t': + self.forward() + if self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ':' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == u'#': + break + while True: + ch = self.peek(length) + if ch in u'\0 \t\r\n\x85\u2028\u2029' \ + or (not self.flow_level and ch == u':' and + self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ + or (self.flow_level and ch in u',:?[]{}'): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if (self.flow_level and ch == u':' + and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): + self.forward(length) + raise ScannerError("while scanning a plain scalar", start_mark, + "found unexpected ':'", self.get_mark(), + "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == u'#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(u''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in u' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != u'!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != u' ': + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if ch != u'!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + or ch in u'-;/?:@&=+$,_.!~*\'()[]%': + if ch == u'%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch.encode('utf-8'), + self.get_mark()) + return u''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + bytes = [] + mark = self.get_mark() + while self.peek() == u'%': + self.forward() + for k in range(2): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % + (self.peek(k).encode('utf-8')), self.get_mark()) + bytes.append(chr(int(self.prefix(2), 16))) + self.forward(2) + try: + value = unicode(''.join(bytes), 'utf-8') + except UnicodeDecodeError, exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in u'\r\n\x85': + if self.prefix(2) == u'\r\n': + self.forward(2) + else: + self.forward() + return u'\n' + elif ch in u'\u2028\u2029': + self.forward() + return ch + return u'' + +#try: +# import psyco +# psyco.bind(Scanner) +#except ImportError: +# pass + diff --git a/scripts/clang-tidy/9.0.1/yaml/serializer.py b/scripts/clang-tidy/9.0.1/yaml/serializer.py new file mode 100644 index 000000000..0bf1e96dc --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/serializer.py @@ -0,0 +1,111 @@ + +__all__ = ['Serializer', 'SerializerError'] + +from error import YAMLError +from events import * +from nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer(object): + + ANCHOR_TEMPLATE = u'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/scripts/clang-tidy/9.0.1/yaml/tokens.py b/scripts/clang-tidy/9.0.1/yaml/tokens.py new file mode 100644 index 000000000..4d0b48a39 --- /dev/null +++ b/scripts/clang-tidy/9.0.1/yaml/tokens.py @@ -0,0 +1,104 @@ + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '' + +class DirectiveToken(Token): + id = '' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '' + +class DocumentEndToken(Token): + id = '' + +class StreamStartToken(Token): + id = '' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '' + +class BlockSequenceStartToken(Token): + id = '' + +class BlockMappingStartToken(Token): + id = '' + +class BlockEndToken(Token): + id = '' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/scripts/clang-tidy/base/common.sh b/scripts/clang-tidy/base/common.sh index ea7ead2e4..d9e3e0abb 100755 --- a/scripts/clang-tidy/base/common.sh +++ b/scripts/clang-tidy/base/common.sh @@ -27,6 +27,9 @@ function mason_build { cp -r "${CLANG_PREFIX}/share/clang/run-clang-tidy.py" "${MASON_PREFIX}/share/" cp -r "${CLANG_PREFIX}/share/clang/clang-tidy-diff.py" "${MASON_PREFIX}/share/" cp -r "${CLANG_PREFIX}/share/clang/run-find-all-symbols.py" "${MASON_PREFIX}/share/" + if [[ -d "${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/yaml" ]]; then + cp -r "${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/yaml" "${MASON_PREFIX}/share/" + fi } diff --git a/scripts/cmake/3.15.2/.travis.yml b/scripts/cmake/3.15.2/.travis.yml new file mode 100644 index 000000000..7b658b66c --- /dev/null +++ b/scripts/cmake/3.15.2/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + compiler: clang + sudo: false + addons: + apt: + sources: [ 'ubuntu-toolchain-r-test' ] + packages: [ 'libstdc++-4.9-dev' ] + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/cmake/3.15.2/script.sh b/scripts/cmake/3.15.2/script.sh new file mode 100755 index 000000000..b433853ff --- /dev/null +++ b/scripts/cmake/3.15.2/script.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +MASON_NAME=cmake +MASON_VERSION=3.15.2 +MASON_LIB_FILE=bin/cmake + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://www.cmake.org/files/v3.15/cmake-${MASON_VERSION}.tar.gz \ + 7427ef92a046c97304c9f529d44c4b4707440c6c + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_prepare_compile { + ${MASON_DIR}/mason install ccache 3.3.4 + export PATH=$(${MASON_DIR}/mason prefix ccache 3.3.4)/bin:${PATH} +} +function mason_compile { + # Add optimization flags since CFLAGS overrides the default (-g -O2) + export CFLAGS="${CFLAGS} -O3 -DNDEBUG" + export CXXFLAGS="${CXXFLAGS} -O3 -DNDEBUG" + # TODO - use mason deps + ./configure --prefix=${MASON_PREFIX} \ + --no-system-libs \ + --parallel=${MASON_CONCURRENCY} \ + --enable-ccache + make -j${MASON_CONCURRENCY} VERBOSE=1 + make install + # remove non-essential things to save on package size + rm -f ${MASON_PREFIX}/bin/ccmake + rm -f ${MASON_PREFIX}/bin/cmakexbuild + rm -f ${MASON_PREFIX}/bin/cpack + rm -f ${MASON_PREFIX}/bin/ctest + rm -rf ${MASON_PREFIX}/share/cmake-*/Help + ls -lh ${MASON_PREFIX}/bin/ +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/cmake/3.18.1/.travis.yml b/scripts/cmake/3.18.1/.travis.yml new file mode 100644 index 000000000..06ee076c0 --- /dev/null +++ b/scripts/cmake/3.18.1/.travis.yml @@ -0,0 +1,15 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11.6 + - os: linux + addons: + apt: + sources: [ 'ubuntu-toolchain-r-test' ] + packages: [ 'libstdc++-4.9-dev' ] + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/cmake/3.18.1/script.sh b/scripts/cmake/3.18.1/script.sh new file mode 100755 index 000000000..56dc2d38c --- /dev/null +++ b/scripts/cmake/3.18.1/script.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +MASON_NAME=cmake +MASON_VERSION=3.18.1 +MASON_LIB_FILE=bin/cmake + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/Kitware/CMake/releases/download/v${MASON_VERSION}/cmake-${MASON_VERSION}.tar.gz \ + 332c23cdda5f4acb8e1ea4bbc5bfaf599d6adc23 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_prepare_compile { + ${MASON_DIR}/mason install ccache 3.7.2 + export PATH=$(${MASON_DIR}/mason prefix ccache 3.7.2)/bin:${PATH} +} +function mason_compile { + # Add optimization flags since CFLAGS overrides the default (-g -O2) + export CFLAGS="${CFLAGS} -O3 -DNDEBUG" + export CXXFLAGS="${CXXFLAGS} -O3 -DNDEBUG" + # TODO - use mason deps + ./configure --prefix=${MASON_PREFIX} \ + --no-system-libs \ + --parallel=${MASON_CONCURRENCY} \ + --enable-ccache + make -j${MASON_CONCURRENCY} VERBOSE=1 + make install + # remove non-essential things to save on package size + rm -f ${MASON_PREFIX}/bin/ccmake + rm -f ${MASON_PREFIX}/bin/cmakexbuild + rm -f ${MASON_PREFIX}/bin/cpack + rm -f ${MASON_PREFIX}/bin/ctest + rm -rf ${MASON_PREFIX}/share/cmake-*/Help + ls -lh ${MASON_PREFIX}/bin/ +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/cmake/3.2.2/script.sh b/scripts/cmake/3.2.2/script.sh index ba3d5bf55..ea95ed0c4 100755 --- a/scripts/cmake/3.2.2/script.sh +++ b/scripts/cmake/3.2.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/cmake function mason_load_source { mason_download \ - http://www.cmake.org/files/v3.2/cmake-3.2.2.tar.gz \ + https://www.cmake.org/files/v3.2/cmake-3.2.2.tar.gz \ b7cb39c390dcd8abad4af33d5507b68965e488b4 mason_extract_tar_gz diff --git a/scripts/cmake/3.21.2/.travis.yml b/scripts/cmake/3.21.2/.travis.yml new file mode 100644 index 000000000..5fd35762f --- /dev/null +++ b/scripts/cmake/3.21.2/.travis.yml @@ -0,0 +1,15 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11.6 + - os: linux + addons: + apt: + sources: [ 'ubuntu-toolchain-r-test' ] + packages: [ 'libstdc++-6-dev' ] + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/cmake/3.21.2/script.sh b/scripts/cmake/3.21.2/script.sh new file mode 100755 index 000000000..382298552 --- /dev/null +++ b/scripts/cmake/3.21.2/script.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +MASON_NAME=cmake +MASON_VERSION=3.21.2 +MASON_LIB_FILE=bin/cmake + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/Kitware/CMake/releases/download/v${MASON_VERSION}/cmake-${MASON_VERSION}.tar.gz \ + e41de8fab437ea46c8c8668a2920a5cf53f2915d + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_prepare_compile { + ${MASON_DIR}/mason install ccache 3.7.2 + export PATH=$(${MASON_DIR}/mason prefix ccache 3.7.2)/bin:${PATH} +} +function mason_compile { + # Add optimization flags since CFLAGS overrides the default (-g -O2) + export CFLAGS="${CFLAGS} -O3 -DNDEBUG" + export CXXFLAGS="${CXXFLAGS} -O3 -DNDEBUG" + # TODO - use mason deps + ./configure --prefix=${MASON_PREFIX} \ + --no-system-libs \ + --parallel=${MASON_CONCURRENCY} \ + --enable-ccache + make -j${MASON_CONCURRENCY} VERBOSE=1 + make install + # remove non-essential things to save on package size + rm -f ${MASON_PREFIX}/bin/ccmake + rm -f ${MASON_PREFIX}/bin/cmakexbuild + rm -f ${MASON_PREFIX}/bin/cpack + rm -f ${MASON_PREFIX}/bin/ctest + rm -rf ${MASON_PREFIX}/share/cmake-*/Help + ls -lh ${MASON_PREFIX}/bin/ +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/cmake/3.5.2/script.sh b/scripts/cmake/3.5.2/script.sh index 7a591ba3c..f07d41d0c 100755 --- a/scripts/cmake/3.5.2/script.sh +++ b/scripts/cmake/3.5.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/cmake function mason_load_source { mason_download \ - http://www.cmake.org/files/v3.5/cmake-${MASON_VERSION}.tar.gz \ + https://www.cmake.org/files/v3.5/cmake-${MASON_VERSION}.tar.gz \ 70cbd618e8ac39414928d79c949968e7dd7a5605 mason_extract_tar_gz diff --git a/scripts/cmake/3.6.2/script.sh b/scripts/cmake/3.6.2/script.sh index 148c96993..47ffe481b 100755 --- a/scripts/cmake/3.6.2/script.sh +++ b/scripts/cmake/3.6.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/cmake function mason_load_source { mason_download \ - http://www.cmake.org/files/v3.6/cmake-${MASON_VERSION}.tar.gz \ + https://www.cmake.org/files/v3.6/cmake-${MASON_VERSION}.tar.gz \ f2c114944dafb319c27bdca214ca7e0739a71cb0 mason_extract_tar_gz diff --git a/scripts/cmake/3.7.1/script.sh b/scripts/cmake/3.7.1/script.sh index 73c0880aa..1af5ee1f3 100755 --- a/scripts/cmake/3.7.1/script.sh +++ b/scripts/cmake/3.7.1/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/cmake function mason_load_source { mason_download \ - http://www.cmake.org/files/v3.7/cmake-${MASON_VERSION}.tar.gz \ + https://www.cmake.org/files/v3.7/cmake-${MASON_VERSION}.tar.gz \ 591a89d83e3659884c52e6cf7009725a6b4e94e5 mason_extract_tar_gz diff --git a/scripts/cmake/3.7.2/script.sh b/scripts/cmake/3.7.2/script.sh index 0a73d40a4..de22a0e53 100755 --- a/scripts/cmake/3.7.2/script.sh +++ b/scripts/cmake/3.7.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/cmake function mason_load_source { mason_download \ - http://www.cmake.org/files/v3.7/cmake-${MASON_VERSION}.tar.gz \ + https://www.cmake.org/files/v3.7/cmake-${MASON_VERSION}.tar.gz \ 35e73aad419b0dca4d5f8e8ba483e29ff54b7f05 mason_extract_tar_gz diff --git a/scripts/cmake/3.8.2/script.sh b/scripts/cmake/3.8.2/script.sh index 7dc216176..25d323025 100755 --- a/scripts/cmake/3.8.2/script.sh +++ b/scripts/cmake/3.8.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/cmake function mason_load_source { mason_download \ - http://www.cmake.org/files/v3.8/cmake-${MASON_VERSION}.tar.gz \ + https://www.cmake.org/files/v3.8/cmake-${MASON_VERSION}.tar.gz \ 7c5b04d0fda1c77f495bbaaa720dac089243c7e7 mason_extract_tar_gz diff --git a/scripts/crosstool-ng/1.23.0/script.sh b/scripts/crosstool-ng/1.23.0/script.sh index 2e7162068..2ca1c7aa4 100755 --- a/scripts/crosstool-ng/1.23.0/script.sh +++ b/scripts/crosstool-ng/1.23.0/script.sh @@ -9,7 +9,7 @@ MASON_LIB_FILE=bin/ct-ng function mason_load_source { mason_download \ - http://crosstool-ng.org/download/crosstool-ng/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ + https://crosstool-ng.org/download/crosstool-ng/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ 1b69890d021b5b50a96b70be0fad3bd6e64a6e9e mason_extract_tar_bz2 diff --git a/scripts/eternal/1.0.0/.travis.yml b/scripts/eternal/1.0.0/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/eternal/1.0.0/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/eternal/1.0.0/script.sh b/scripts/eternal/1.0.0/script.sh new file mode 100644 index 000000000..5b8c92a92 --- /dev/null +++ b/scripts/eternal/1.0.0/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=eternal +MASON_VERSION=1.0.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/eternal/archive/v${MASON_VERSION}.tar.gz \ + b40b0fe8de247b4467d6c8c1fa6e4ac4a06c44b2 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/eternal-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/expat/2.3.0/.travis.yml b/scripts/expat/2.3.0/.travis.yml new file mode 100644 index 000000000..d33f4b67b --- /dev/null +++ b/scripts/expat/2.3.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode12.0 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-6-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/expat/2.3.0/script.sh b/scripts/expat/2.3.0/script.sh new file mode 100755 index 000000000..1d00d78ca --- /dev/null +++ b/scripts/expat/2.3.0/script.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +MASON_NAME=expat +MASON_VERSION=2.3.0 +MASON_VERSION2="R_${MASON_VERSION//./_}" +MASON_LIB_FILE=lib/libexpat.a +MASON_PKGCONFIG_FILE=lib/pkgconfig/expat.pc + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/libexpat/libexpat/releases/download/${MASON_VERSION2}/expat-${MASON_VERSION}.tar.gz \ + f6d07e2b059f7c17a0d16257d972268f66c4aec3 + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/expat-${MASON_VERSION} +} + +function mason_compile { + # Add optimization flags since CFLAGS overrides the default (-g -O2) + export CFLAGS="${CFLAGS} -O3 -DNDEBUG" + ./configure \ + --prefix=${MASON_PREFIX} \ + ${MASON_HOST_ARG} \ + --without-xmlwf \ + --enable-static \ + --disable-shared \ + --disable-dependency-tracking + + make install -j${MASON_CONCURRENCY} +} + +function mason_cflags { + echo -I${MASON_PREFIX}/include +} + +function mason_ldflags { + echo -L${MASON_PREFIX}/lib -lexpat +} + + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/freetype/2.5.4/script.sh b/scripts/freetype/2.5.4/script.sh index 5f68a51d3..86c2f884f 100755 --- a/scripts/freetype/2.5.4/script.sh +++ b/scripts/freetype/2.5.4/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/freetype2.pc function mason_load_source { mason_download \ - http://nongnu.askapache.com/freetype/freetype-${MASON_VERSION}.tar.bz2 \ + https://nongnu.askapache.com/freetype/freetype-old/freetype-${MASON_VERSION}.tar.bz2 \ 0646f7e62a6191affe92270e2544e6011f5227e8 mason_extract_tar_bz2 diff --git a/scripts/freetype/2.5.5/script.sh b/scripts/freetype/2.5.5/script.sh index 7c9d77eb5..339350747 100755 --- a/scripts/freetype/2.5.5/script.sh +++ b/scripts/freetype/2.5.5/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/freetype2.pc function mason_load_source { mason_download \ - http://nongnu.askapache.com/freetype/freetype-${MASON_VERSION}.tar.bz2 \ + https://nongnu.askapache.com/freetype/freetype-old/freetype-${MASON_VERSION}.tar.bz2 \ c857bfa638b9c71e48baacd1cb12be446b62c333 mason_extract_tar_bz2 diff --git a/scripts/freetype/2.6.5/script.sh b/scripts/freetype/2.6.5/script.sh index 719407c8c..c28316095 100755 --- a/scripts/freetype/2.6.5/script.sh +++ b/scripts/freetype/2.6.5/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/freetype2.pc function mason_load_source { mason_download \ - http://nongnu.askapache.com/freetype/freetype-${MASON_VERSION}.tar.bz2 \ + https://nongnu.askapache.com/freetype/freetype-old/freetype-${MASON_VERSION}.tar.bz2 \ 24dd30c95d3795cb3d82a760b9858992de262630 mason_extract_tar_bz2 diff --git a/scripts/freetype/2.6/script.sh b/scripts/freetype/2.6/script.sh index b9d1b3dcb..222bd9901 100755 --- a/scripts/freetype/2.6/script.sh +++ b/scripts/freetype/2.6/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/freetype2.pc function mason_load_source { mason_download \ - http://nongnu.askapache.com/freetype/freetype-${MASON_VERSION}.tar.bz2 \ + https://nongnu.askapache.com/freetype/freetype-old/freetype-${MASON_VERSION}.tar.bz2 \ 3cdf364b5db1c1adba670b188d76035ecba2d77c mason_extract_tar_bz2 diff --git a/scripts/freetype/2.7.1/script.sh b/scripts/freetype/2.7.1/script.sh index c35c7c88c..a0d5b3fee 100755 --- a/scripts/freetype/2.7.1/script.sh +++ b/scripts/freetype/2.7.1/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/freetype2.pc function mason_load_source { mason_download \ - http://nongnu.askapache.com/freetype/freetype-${MASON_VERSION}.tar.bz2 \ + https://nongnu.askapache.com/freetype/freetype-${MASON_VERSION}.tar.bz2 \ 51abc6f9afd5bbcbdcc0d9ea20b145f0ff1be632 mason_extract_tar_bz2 diff --git a/scripts/gdal/1.11.1-big-pants/script.sh b/scripts/gdal/1.11.1-big-pants/script.sh index 0c2643825..6192f3386 100755 --- a/scripts/gdal/1.11.1-big-pants/script.sh +++ b/scripts/gdal/1.11.1-big-pants/script.sh @@ -79,7 +79,7 @@ function mason_compile { fi # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/gdal/1.11.1/script.sh b/scripts/gdal/1.11.1/script.sh index e227185b8..70cc03c2b 100755 --- a/scripts/gdal/1.11.1/script.sh +++ b/scripts/gdal/1.11.1/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libgdal.a function mason_load_source { mason_download \ - http://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ 6a06e527e6a5abd565a67f84caadf9f891e5f49b mason_extract_tar_gz @@ -57,7 +57,7 @@ function mason_compile { # not produce a shared library no matter if --enable-shared is passed # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/gdal/1.11.2/script.sh b/scripts/gdal/1.11.2/script.sh index d1f04ba01..38e6e2186 100755 --- a/scripts/gdal/1.11.2/script.sh +++ b/scripts/gdal/1.11.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libgdal.a function mason_load_source { mason_download \ - http://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ 50660f82fb01ff1c97f6342a3fbbe5bdc6d01b09 mason_extract_tar_gz @@ -57,7 +57,7 @@ function mason_compile { # not produce a shared library no matter if --enable-shared is passed # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/gdal/2.0.2/script.sh b/scripts/gdal/2.0.2/script.sh index dbf9e8052..63d97e544 100755 --- a/scripts/gdal/2.0.2/script.sh +++ b/scripts/gdal/2.0.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libgdal.a function mason_load_source { mason_download \ - http://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ 6b82c9f5e356774a8451182d8720ed4a262a0d5e mason_extract_tar_gz @@ -81,7 +81,7 @@ function mason_compile { fi # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/gdal/2.1.1/script.sh b/scripts/gdal/2.1.1/script.sh index f6ddf6ddd..f1894883c 100755 --- a/scripts/gdal/2.1.1/script.sh +++ b/scripts/gdal/2.1.1/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libgdal.a function mason_load_source { mason_download \ - http://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ 66aa2e083027cff36c000060f4e61ce5e1405307 mason_extract_tar_gz @@ -81,7 +81,7 @@ function mason_compile { fi # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/gdal/2.1.3/script.sh b/scripts/gdal/2.1.3/script.sh index fd92e427e..135155221 100755 --- a/scripts/gdal/2.1.3/script.sh +++ b/scripts/gdal/2.1.3/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libgdal.a function mason_load_source { mason_download \ - http://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ 552231f8ffe060ba30e37f1a8e6c4665bcf3cd1d mason_extract_tar_gz @@ -90,7 +90,7 @@ function mason_compile { export CXX="${MASON_CCACHE} ${CXX}" # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then export CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/gdal/2.2.1/script.sh b/scripts/gdal/2.2.1/script.sh index 7f971a22d..c275ebedd 100755 --- a/scripts/gdal/2.2.1/script.sh +++ b/scripts/gdal/2.2.1/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libgdal.a function mason_load_source { mason_download \ - http://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ ed13ebc5b23f3d4a2c88e9d28d2d0b1b97563e3e mason_extract_tar_gz @@ -90,7 +90,7 @@ function mason_compile { export CXX="${MASON_CCACHE} ${CXX}" # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then export CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/gdal/2.2.2/script.sh b/scripts/gdal/2.2.2/script.sh index 4f8c2b139..d570d61d7 100755 --- a/scripts/gdal/2.2.2/script.sh +++ b/scripts/gdal/2.2.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libgdal.a function mason_load_source { mason_download \ - http://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ 1ed7b4303cd30c212bb75b763d7447acebad6e95 mason_extract_tar_gz @@ -100,7 +100,7 @@ function mason_compile { export CXX="${MASON_CCACHE} ${CXX}" # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then export CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/gdal/2.2.3-1/script.sh b/scripts/gdal/2.2.3-1/script.sh index fef003a27..2b474aadd 100755 --- a/scripts/gdal/2.2.3-1/script.sh +++ b/scripts/gdal/2.2.3-1/script.sh @@ -9,7 +9,7 @@ MASON_LIB_FILE=lib/libgdal.a function mason_load_source { mason_download \ - http://download.osgeo.org/gdal/${MASON_VERSION2}/gdal-${MASON_VERSION2}.tar.gz \ + https://download.osgeo.org/gdal/${MASON_VERSION2}/gdal-${MASON_VERSION2}.tar.gz \ f4ac4fb76e20cc149d169163914d76d51173ce82 mason_extract_tar_gz @@ -122,7 +122,7 @@ function mason_compile { export CXX="${MASON_CCACHE} ${CXX}" # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then export CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/gdal/2.2.3/script.sh b/scripts/gdal/2.2.3/script.sh index 9d71ce57b..5d5d88e21 100755 --- a/scripts/gdal/2.2.3/script.sh +++ b/scripts/gdal/2.2.3/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libgdal.a function mason_load_source { mason_download \ - http://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}RC1.tar.gz \ + https://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}RC1.tar.gz \ f4ac4fb76e20cc149d169163914d76d51173ce82 mason_extract_tar_gz @@ -106,7 +106,7 @@ function mason_compile { export CXX="${MASON_CCACHE} ${CXX}" # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then export CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/gdal/2.4.1/.travis.yml b/scripts/gdal/2.4.1/.travis.yml new file mode 100644 index 000000000..3cb9dac37 --- /dev/null +++ b/scripts/gdal/2.4.1/.travis.yml @@ -0,0 +1,20 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/gdal/2.4.1/patch.diff b/scripts/gdal/2.4.1/patch.diff new file mode 100644 index 000000000..74945d50d --- /dev/null +++ b/scripts/gdal/2.4.1/patch.diff @@ -0,0 +1,141 @@ +diff --git a/apps/GNUmakefile b/apps/GNUmakefile +index a87cd0f..931a988 100644 +--- a/apps/GNUmakefile ++++ b/apps/GNUmakefile +@@ -79,103 +79,103 @@ gdalbuildvrt_lib.$(OBJ_EXT): gdalbuildvrt_lib.cpp + $(CXX) -c $(GDAL_INCLUDE) $(CPPFLAGS) $(CXXFLAGS) $< -o $@ + + gdalinfo$(EXE): gdalinfo_bin.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) $(CONFIG_LIB_UTILS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) $(CONFIG_LIB_UTILS) -o $@ $(LNK_FLAGS) + + gdalserver$(EXE): gdalserver.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdal_translate$(EXE): gdal_translate_bin.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdaladdo$(EXE): gdaladdo.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdalwarp$(EXE): gdalwarp_bin.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdal_contour$(EXE): gdal_contour.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + nearblack$(EXE): nearblack_bin.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdalmanage$(EXE): gdalmanage.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdal_rasterize$(EXE): gdal_rasterize_bin.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdaltindex$(EXE): gdaltindex.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdalbuildvrt$(EXE): gdalbuildvrt_bin.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + multireadtest$(EXE): multireadtest.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + dumpoverviews$(EXE): dumpoverviews.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdalenhance$(EXE): gdalenhance.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdaldem$(EXE): gdaldem_bin.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdal_grid$(EXE): gdal_grid_bin.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdalwarpsimple$(EXE): gdalwarpsimple.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdaltransform$(EXE): gdaltransform.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdallocationinfo$(EXE): gdallocationinfo.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdalsrsinfo$(EXE): gdalsrsinfo.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdalflattenmask$(EXE): gdalflattenmask.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdaltorture$(EXE): gdaltorture.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdal2ogr$(EXE): gdal2ogr.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + ogrinfo$(EXE): ogrinfo.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + ogrlineref$(EXE): ogrlineref.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + ogr2ogr$(EXE): ogr2ogr_bin.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + testepsg$(EXE): testepsg.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + ogrtindex$(EXE): ogrtindex.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + test_ogrsf$(EXE): test_ogrsf.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gdalasyncread$(EXE): gdalasyncread.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + testreprojmulti$(EXE): testreprojmulti.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gnmmanage$(EXE): gnmmanage.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + gnmanalyse$(EXE): gnmanalyse.$(OBJ_EXT) $(DEP_LIBS) +- $(LD) $(LNK_FLAGS) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ ++ $(LD) $< $(XTRAOBJ) $(CONFIG_LIBS) -o $@ $(LNK_FLAGS) + + clean: + $(RM) *.o $(BIN_LIST) core gdal-config gdal-config-inst diff --git a/scripts/gdal/2.4.1/readme.md b/scripts/gdal/2.4.1/readme.md new file mode 100644 index 000000000..e6e98dd37 --- /dev/null +++ b/scripts/gdal/2.4.1/readme.md @@ -0,0 +1,76 @@ +## Packaging GDAL + +### Background + +GDAL is one of the harder software libraries to package because it has so many required and potential dependencies. + +Also, mason prefers packaging libraries at static archives, which complicates things because this complicates dependency handling: when linking to static libraries you need all of the libraries (passed to the linker) that static archive depends on. For GDAL that is both the C standard library, the C++ standard library, and potentially a lot of libraries other libraries, both with C and C++ dependencies. This is the main reason that the script.sh is so narly. The upside is that then the libraries are standalone at runtime. So, hard to build, easy to run. It's a tradeoff + +### Steps to package + +This document intends to guide you to the basic steps to package a new version of GDAL in mason. + +#### Step 1: Copy a previous GDAL package. + +Find the last successful package gdal mason. Perhaps use the highest incremented version: + +``` +ls scripts/gdal/ +1.11.1 1.11.1-big-pants 1.11.2 2.0.2 2.1.1 2.1.3 2.2.1 2.2.2 2.2.3 2.2.3-1 2.4.1 dev +``` + +It is `2.2.3-1` at the time of this writing. + +Then find most recent release at http://download.osgeo.org/gdal/ + +Create new package: + +``` +cd mason +cp -r scripts/gdal/2.2.3-1 scripts/gdal/2.4.1 +``` + +Open up `scripts/gdal/2.4.1/script.sh` and edit the `MASON_VERSION` variable to be `2.4.1` + +#### Step 2: Now try building + +This will fail with an error, but just do it anyway: + +``` +./mason build gdal 2.4.1 +``` + +The error is because the hash changed for the upstream download, because you changed the `MASON_VERSION`. + +You will see an error like: + +> Hash 38758d9fa5083e8d8e4333c38e132e154da9f25f of file /Users/danespringmeyer/projects/mason/mason_packages/.cache/gdal-2.4.1 doesn't match f4ac4fb76e20cc149d169163914d76d51173ce82 + +To fix this, edit `scripts/gdal/2.4.1/script.sh` and add the first hash reported on `line 12`. + +Now try building again: + +``` +./mason build gdal 2.4.1 +``` + +If it succeeded locally then you are good to continue to the next step. + + +#### Step 3: push to github and build on travis + +First create a new mason branch and push all the new scripts: + +``` +git checkout -b gdal-2.4.1 +git add scripts/gdal +git commit scripts/gdal -m "adding GDAL 2.4.1" +``` + +Then try triggering a build on travis. To do this do: + +``` +./mason trigger gdal 2.4.1 +``` + +And you watch for the build job to appear at https://travis-ci.org/mapbox/mason/builds. It wll have a "lego" icon and a title like "Building gdal 2.4.1", which denotes a triggered build. diff --git a/scripts/gdal/2.4.1/script.sh b/scripts/gdal/2.4.1/script.sh new file mode 100755 index 000000000..0c76d88b4 --- /dev/null +++ b/scripts/gdal/2.4.1/script.sh @@ -0,0 +1,220 @@ +#!/usr/bin/env bash + +MASON_NAME=gdal +MASON_VERSION=2.4.1 +MASON_LIB_FILE=lib/libgdal.a + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://download.osgeo.org/gdal/${MASON_VERSION}/gdal-${MASON_VERSION}.tar.gz \ + 38758d9fa5083e8d8e4333c38e132e154da9f25f + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_prepare_compile { + # This line is critical: it ensures that we install deps in + # the parent folder rather than within the ./build directory + # such that our modifications to the .la files work + cd $(dirname ${MASON_ROOT}) + # set up to fix libtool .la files + # https://github.com/mapbox/mason/issues/61 + if [[ $(uname -s) == 'Darwin' ]]; then + FIND="\/Users\/travis\/build\/mapbox\/mason" + else + FIND="\/home\/travis\/build\/mapbox\/mason" + fi + REPLACE="$(pwd)" + REPLACE=${REPLACE////\\/} + LIBTIFF_VERSION="4.0.8" + PROJ_VERSION="4.9.3" + JPEG_VERSION="1.5.2" + PNG_VERSION="1.6.32" + EXPAT_VERSION="2.2.4" + POSTGRES_VERSION="9.6.5" + SQLITE_VERSION="3.21.0" + CCACHE_VERSION="3.3.1" + GEOS_VERSION="3.6.2" + ${MASON_DIR}/mason install geos ${GEOS_VERSION} + MASON_GEOS=$(${MASON_DIR}/mason prefix geos ${GEOS_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_GEOS}/lib/libgeos.la + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_GEOS}/lib/libgeos_c.la + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_GEOS}/bin/geos-config + ${MASON_DIR}/mason install libtiff ${LIBTIFF_VERSION} + MASON_TIFF=$(${MASON_DIR}/mason prefix libtiff ${LIBTIFF_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_TIFF}/lib/libtiff.la + ${MASON_DIR}/mason install proj ${PROJ_VERSION} + MASON_PROJ=$(${MASON_DIR}/mason prefix proj ${PROJ_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_PROJ}/lib/libproj.la + ${MASON_DIR}/mason install jpeg_turbo ${JPEG_VERSION} + MASON_JPEG=$(${MASON_DIR}/mason prefix jpeg_turbo ${JPEG_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_JPEG}/lib/libjpeg.la + ${MASON_DIR}/mason install libpng ${PNG_VERSION} + MASON_PNG=$(${MASON_DIR}/mason prefix libpng ${PNG_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_PNG}/lib/libpng.la + ${MASON_DIR}/mason install expat ${EXPAT_VERSION} + MASON_EXPAT=$(${MASON_DIR}/mason prefix expat ${EXPAT_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_EXPAT}/lib/libexpat.la + ${MASON_DIR}/mason install libpq ${POSTGRES_VERSION} + MASON_LIBPQ=$(${MASON_DIR}/mason prefix libpq ${POSTGRES_VERSION}) + ${MASON_DIR}/mason install sqlite ${SQLITE_VERSION} + MASON_SQLITE=$(${MASON_DIR}/mason prefix sqlite ${SQLITE_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_SQLITE}/lib/libsqlite3.la + # depends on sudo apt-get install zlib1g-dev + ${MASON_DIR}/mason install zlib system + MASON_ZLIB=$(${MASON_DIR}/mason prefix zlib system) + # depends on sudo apt-get install libc6-dev + #${MASON_DIR}/mason install iconv system + #MASON_ICONV=$(${MASON_DIR}/mason prefix iconv system) + export LIBRARY_PATH=${MASON_LIBPQ}/lib:${LIBRARY_PATH:-} + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION})/bin/ccache +} + +function mason_compile { + if [[ ${MASON_PLATFORM} == 'linux' ]]; then + mason_step "Loading patch" + patch -N -p1 < ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff + fi + + # very custom handling for the C++ lib of geos, which also needs + # to be linked when linking statically (since geos_c C API depends on it) + if [[ $(uname -s) == 'Linux' ]]; then + perl -i -p -e "s/ \-lgeos_c/ \-lgeos_c \-lgeos \-lstdc++ \-lm/g;" configure + elif [[ $(uname -s) == 'Darwin' ]]; then + perl -i -p -e "s/ \-lgeos_c/ \-lgeos_c \-lgeos \-lc++ \-lm/g;" configure + fi + + # note CFLAGS overrides defaults so we need to add optimization flags back + export CFLAGS="${CFLAGS} -O3 -DNDEBUG" + export CXXFLAGS="${CXXFLAGS} -O3 -DNDEBUG" + + CUSTOM_LIBS="-L${MASON_GEOS}/lib -lgeos_c -lgeos -L${MASON_SQLITE}/lib -lsqlite3 -L${MASON_TIFF}/lib -ltiff -L${MASON_JPEG}/lib -ljpeg -L${MASON_PROJ}/lib -lproj -L${MASON_PNG}/lib -lpng -L${MASON_EXPAT}/lib -lexpat" + CUSTOM_CFLAGS="${CFLAGS} -I${MASON_GEOS}/include -I${MASON_LIBPQ}/include -I${MASON_TIFF}/include -I${MASON_JPEG}/include -I${MASON_PROJ}/include -I${MASON_PNG}/include -I${MASON_EXPAT}/include" + + # very custom handling for libpq/postgres support + # forcing our portable static library to be used + MASON_LIBPQ_PATH=${MASON_LIBPQ}/lib/libpq.a + + if [[ $(uname -s) == 'Linux' ]]; then + # on Linux passing -Wl will lead to libtool re-positioning libpq.a in the wrong place (no longer after libgdal.a) + # which leads to unresolved symbols + CUSTOM_LDFLAGS="${LDFLAGS} ${MASON_LIBPQ_PATH}" + # linking statically to libsqlite requires -ldl -pthreads + CUSTOM_LDFLAGS="${CUSTOM_LDFLAGS} -ldl -pthread" + else + # on OSX not passing -Wl will break libtool archive creation leading to confusing arch errors + CUSTOM_LDFLAGS="${LDFLAGS} -Wl,${MASON_LIBPQ_PATH}" + fi + # we have to remove -lpq otherwise it will trigger linking to system /usr/lib/libpq + perl -i -p -e "s/\-lpq //g;" configure + # on linux -Wl,/path/to/libpq.a still does not work for the configure test + # so we have to force it into LIBS. But we don't do this on OS X since it breaks libtool archive logic + if [[ $(uname -s) == 'Linux' ]]; then + CUSTOM_LIBS="${MASON_LIBPQ}/lib/libpq.a -pthread ${CUSTOM_LIBS}" + fi + + export CXX="${MASON_CCACHE} ${CXX}" + + # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + if [[ $(uname -s) == 'Darwin' ]]; then + export CXX="${CXX} -stdlib=libc++ -std=c++11" + fi + + # note: it might be tempting to build with --without-libtool + # but I find that will only lead to a shared libgdal.so and will + # not produce a static library even if --enable-static is passed + LIBS="${CUSTOM_LIBS}" LDFLAGS="${CUSTOM_LDFLAGS}" CFLAGS="${CUSTOM_CFLAGS}" ./configure \ + --enable-static --disable-shared \ + ${MASON_HOST_ARG} \ + --prefix=${MASON_PREFIX} \ + --with-libz=${MASON_ZLIB} \ + --disable-rpath \ + --with-libjson-c=internal \ + --with-geotiff=internal \ + --with-expat=${MASON_EXPAT} \ + --with-threads=yes \ + --with-fgdb=no \ + --with-rename-internal-libtiff-symbols=no \ + --with-rename-internal-libgeotiff-symbols=no \ + --with-hide-internal-symbols=yes \ + --with-libtiff=${MASON_TIFF} \ + --with-jpeg=${MASON_JPEG} \ + --with-png=${MASON_PNG} \ + --with-pg=${MASON_LIBPQ}/bin/pg_config \ + --with-static-proj4=${MASON_PROJ} \ + --with-sqlite3=${MASON_SQLITE} \ + --with-geos=${MASON_GEOS}/bin/geos-config \ + --with-spatialite=no \ + --with-curl=no \ + --with-xml2=no \ + --with-pcraster=no \ + --with-cfitsio=no \ + --with-odbc=no \ + --with-libkml=no \ + --with-pcidsk=no \ + --with-jasper=no \ + --with-gif=no \ + --with-grib=no \ + --with-freexl=no \ + --with-avx=no \ + --with-sse=no \ + --with-perl=no \ + --with-python=no \ + --with-java=no \ + --with-podofo=no \ + --with-pam \ + --with-webp=no \ + --with-pcre=no \ + --with-liblzma=no \ + --with-netcdf=no \ + --with-poppler=no \ + --with-sfcgal=no + make -j${MASON_CONCURRENCY} + make install + + relativize_gdal_config ${MASON_PREFIX}/bin/gdal-config ${MASON_PREFIX} ${MASON_ROOT}/${MASON_PLATFORM_ID} + +} + +function relativize_gdal_config() { + path_to_gdal_config=${1} + prefix_path=${2} + build_path=${3} + RESOLVE_SYMLINK="readlink" + if [[ $(uname -s) == 'Linux' ]];then + RESOLVE_SYMLINK="readlink -f" + fi + mv ${path_to_gdal_config} /tmp/gdal-config-backup + # append code at start + echo 'if test -L $0; then BASE=$( dirname $( '${RESOLVE_SYMLINK}' "$0" ) ); else BASE=$( dirname "$0" ); fi' > ${path_to_gdal_config} + cat /tmp/gdal-config-backup >> ${path_to_gdal_config} + chmod +x ${path_to_gdal_config} + + # now modify in place + python -c "data=open('${path_to_gdal_config}','r').read();open('${path_to_gdal_config}','w').write(data.replace('${prefix_path}','\$( cd \"\$( dirname \${BASE} )\" && pwd )'))" + # fix the path to dep libs (CONFIG_DEP_LIBS) + python -c "data=open('${path_to_gdal_config}','r').read();open('${path_to_gdal_config}','w').write(data.replace('${build_path}','\$( cd \"\$( dirname \$( dirname \$( dirname \${BASE} ) ))\" && pwd )'))" + # hack to re-add -lpq since otherwise it will not end up in --dep-libs + python -c "data=open('${path_to_gdal_config}','r').read();open('${path_to_gdal_config}','w').write(data.replace('\$CONFIG_DEP_LIBS','\$CONFIG_DEP_LIBS -lpq'))" +} + + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + echo $(${MASON_PREFIX}/bin/gdal-config --dep-libs --libs) +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/gdal/dev/script.sh b/scripts/gdal/dev/script.sh index 7e38dcbc5..49b72bdb7 100755 --- a/scripts/gdal/dev/script.sh +++ b/scripts/gdal/dev/script.sh @@ -82,7 +82,7 @@ function mason_compile { fi # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/geojson/0.4.3/.travis.yml b/scripts/geojson/0.4.3/.travis.yml new file mode 100644 index 000000000..5fca39ee3 --- /dev/null +++ b/scripts/geojson/0.4.3/.travis.yml @@ -0,0 +1,8 @@ +language: cpp +sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geojson/0.4.3/script.sh b/scripts/geojson/0.4.3/script.sh new file mode 100755 index 000000000..30c873848 --- /dev/null +++ b/scripts/geojson/0.4.3/script.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +MASON_NAME=geojson +MASON_VERSION=0.4.3 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geojson-cpp/archive/v${MASON_VERSION}.tar.gz \ + 9f5f23269d13f3dd479540218ade9dd6095c8892 + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geojson-cpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/geojson/6b2850a/.travis.yml b/scripts/geojson/6b2850a/.travis.yml new file mode 100644 index 000000000..5fca39ee3 --- /dev/null +++ b/scripts/geojson/6b2850a/.travis.yml @@ -0,0 +1,8 @@ +language: cpp +sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geojson/6b2850a/script.sh b/scripts/geojson/6b2850a/script.sh new file mode 100755 index 000000000..ee18b97bc --- /dev/null +++ b/scripts/geojson/6b2850a/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=geojson +MASON_VERSION=6b2850a +MASON_VERSION_FULL=6b2850a2778f1a33d0489a373dc06ff494aca3c9 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geojson-cpp/archive/${MASON_VERSION}.tar.gz \ + 4090b3fe9f1c9368f095bf16e3ce12175a7b3b09 + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geojson-cpp-${MASON_VERSION_FULL} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/geojsonvt/6.4.0/.travis.yml b/scripts/geojsonvt/6.4.0/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geojsonvt/6.4.0/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geojsonvt/6.4.0/script.sh b/scripts/geojsonvt/6.4.0/script.sh new file mode 100644 index 000000000..bf0cf2e49 --- /dev/null +++ b/scripts/geojsonvt/6.4.0/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=geojsonvt +MASON_VERSION=6.4.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geojson-vt-cpp/archive/v${MASON_VERSION}.tar.gz \ + a5ed84cb624ffc8430372e7920a673dd31536d69 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geojson-vt-cpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geojsonvt/6.5.0/.travis.yml b/scripts/geojsonvt/6.5.0/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geojsonvt/6.5.0/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geojsonvt/6.5.0/script.sh b/scripts/geojsonvt/6.5.0/script.sh new file mode 100644 index 000000000..9fef14686 --- /dev/null +++ b/scripts/geojsonvt/6.5.0/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=geojsonvt +MASON_VERSION=6.5.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geojson-vt-cpp/archive/v${MASON_VERSION}.tar.gz \ + 7c06e4c4022641947346638673e095851643c535 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geojson-vt-cpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geojsonvt/6.5.1/.travis.yml b/scripts/geojsonvt/6.5.1/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geojsonvt/6.5.1/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geojsonvt/6.5.1/script.sh b/scripts/geojsonvt/6.5.1/script.sh new file mode 100644 index 000000000..e8cf2dfc4 --- /dev/null +++ b/scripts/geojsonvt/6.5.1/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=geojsonvt +MASON_VERSION=6.5.1 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geojson-vt-cpp/archive/v${MASON_VERSION}.tar.gz \ + dbd5732f14acd5ec13ff89ee588f909211f67585 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geojson-vt-cpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geojsonvt/6.6.0/.travis.yml b/scripts/geojsonvt/6.6.0/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geojsonvt/6.6.0/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geojsonvt/6.6.0/script.sh b/scripts/geojsonvt/6.6.0/script.sh new file mode 100644 index 000000000..643a5f473 --- /dev/null +++ b/scripts/geojsonvt/6.6.0/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=geojsonvt +MASON_VERSION=6.6.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geojson-vt-cpp/archive/v${MASON_VERSION}.tar.gz \ + e0745d9c7878d25018810c434abe54775e16da11 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geojson-vt-cpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geojsonvt/6.6.1/.travis.yml b/scripts/geojsonvt/6.6.1/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geojsonvt/6.6.1/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geojsonvt/6.6.1/script.sh b/scripts/geojsonvt/6.6.1/script.sh new file mode 100644 index 000000000..b480c9017 --- /dev/null +++ b/scripts/geojsonvt/6.6.1/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=geojsonvt +MASON_VERSION=6.6.1 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geojson-vt-cpp/archive/v${MASON_VERSION}.tar.gz \ + 8d3b31348d0b1858033d6fbff625241f58a6046f + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geojson-vt-cpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geojsonvt/6.6.2/.travis.yml b/scripts/geojsonvt/6.6.2/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geojsonvt/6.6.2/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geojsonvt/6.6.2/script.sh b/scripts/geojsonvt/6.6.2/script.sh new file mode 100644 index 000000000..8e69bfcc3 --- /dev/null +++ b/scripts/geojsonvt/6.6.2/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=geojsonvt +MASON_VERSION=6.6.2 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geojson-vt-cpp/archive/v${MASON_VERSION}.tar.gz \ + a16b1164e8521fedf92d0a04427971057988149f + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geojson-vt-cpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geojsonvt/6.6.3/.travis.yml b/scripts/geojsonvt/6.6.3/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geojsonvt/6.6.3/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geojsonvt/6.6.3/script.sh b/scripts/geojsonvt/6.6.3/script.sh new file mode 100644 index 000000000..4ef9708cf --- /dev/null +++ b/scripts/geojsonvt/6.6.3/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=geojsonvt +MASON_VERSION=6.6.3 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geojson-vt-cpp/archive/v${MASON_VERSION}.tar.gz \ + 174f931c710ac17ec03e188ef4737455c849b2a8 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geojson-vt-cpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geometry/0.9.3/.travis.yml b/scripts/geometry/0.9.3/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geometry/0.9.3/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geometry/0.9.3/script.sh b/scripts/geometry/0.9.3/script.sh new file mode 100644 index 000000000..28dbf3bf7 --- /dev/null +++ b/scripts/geometry/0.9.3/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=geometry +MASON_VERSION=0.9.3 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geometry.hpp/archive/v${MASON_VERSION}.tar.gz \ + 9ce30c5842f190e7ca8cbf91a70ce6076c9a320e + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geometry.hpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geometry/1.0.0/.travis.yml b/scripts/geometry/1.0.0/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geometry/1.0.0/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geometry/1.0.0/script.sh b/scripts/geometry/1.0.0/script.sh new file mode 100644 index 000000000..70b9817fa --- /dev/null +++ b/scripts/geometry/1.0.0/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=geometry +MASON_VERSION=1.0.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geometry.hpp/archive/v${MASON_VERSION}.tar.gz \ + 021de4e4f0d2a0e4b3fd6fff254ea043921a5ded + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geometry.hpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geometry/1.1.0/.travis.yml b/scripts/geometry/1.1.0/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geometry/1.1.0/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geometry/1.1.0/script.sh b/scripts/geometry/1.1.0/script.sh new file mode 100644 index 000000000..e069f0cc0 --- /dev/null +++ b/scripts/geometry/1.1.0/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=geometry +MASON_VERSION=1.1.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geometry.hpp/archive/v${MASON_VERSION}.tar.gz \ + 06729dbb4afb378ae6de3847290c7c1b1ee61680 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geometry.hpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geometry/2.0.0/.travis.yml b/scripts/geometry/2.0.0/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geometry/2.0.0/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geometry/2.0.0/script.sh b/scripts/geometry/2.0.0/script.sh new file mode 100644 index 000000000..87bc71a6d --- /dev/null +++ b/scripts/geometry/2.0.0/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=geometry +MASON_VERSION=2.0.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geometry.hpp/archive/v${MASON_VERSION}.tar.gz \ + 95d139d736e160d9b7016723d8c7ad9ae4a00d51 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geometry.hpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geometry/2.0.1/.travis.yml b/scripts/geometry/2.0.1/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geometry/2.0.1/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geometry/2.0.1/script.sh b/scripts/geometry/2.0.1/script.sh new file mode 100644 index 000000000..74c683710 --- /dev/null +++ b/scripts/geometry/2.0.1/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=geometry +MASON_VERSION=2.0.1 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geometry.hpp/archive/v${MASON_VERSION}.tar.gz \ + 9a549daeda2bb17fc62ce58e7d4bb871bec609fe + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geometry.hpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geometry/2.0.3/.travis.yml b/scripts/geometry/2.0.3/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geometry/2.0.3/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geometry/2.0.3/script.sh b/scripts/geometry/2.0.3/script.sh new file mode 100644 index 000000000..00b48e3b3 --- /dev/null +++ b/scripts/geometry/2.0.3/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=geometry +MASON_VERSION=2.0.3 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geometry.hpp/archive/v${MASON_VERSION}.tar.gz \ + 007ac3a6d2b6f55405845470de1b1c7b70b19c68 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geometry.hpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geometry/4f1b6e6/.travis.yml b/scripts/geometry/4f1b6e6/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geometry/4f1b6e6/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geometry/4f1b6e6/script.sh b/scripts/geometry/4f1b6e6/script.sh new file mode 100644 index 000000000..42fc02da1 --- /dev/null +++ b/scripts/geometry/4f1b6e6/script.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +MASON_NAME=geometry +MASON_VERSION=4f1b6e6 +MASON_VERSION_FULL=4f1b6e688e6766df8a9ae698a814718d4ebcfdb5 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geometry.hpp/archive/${MASON_VERSION}.tar.gz \ + 690052c9a9d15d5a74c9acf63215bdb07734729c + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geometry.hpp-${MASON_VERSION_FULL} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geometry/cc46a09/.travis.yml b/scripts/geometry/cc46a09/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/geometry/cc46a09/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/geometry/cc46a09/script.sh b/scripts/geometry/cc46a09/script.sh new file mode 100644 index 000000000..514236c10 --- /dev/null +++ b/scripts/geometry/cc46a09/script.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +MASON_NAME=geometry +MASON_VERSION=cc46a09 +MASON_VERSION_FULL=cc46a0960d42d971a342d1cf032c55dbe72a5ac2 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/geometry.hpp/archive/${MASON_VERSION}.tar.gz \ + 21b5c82bd7d0a877a093747bc089c8fbb41477c8 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/geometry.hpp-${MASON_VERSION_FULL} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/geos/3.4.2/script.sh b/scripts/geos/3.4.2/script.sh index add5772e2..9e3f7e976 100755 --- a/scripts/geos/3.4.2/script.sh +++ b/scripts/geos/3.4.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libgeos.a function mason_load_source { mason_download \ - http://download.osgeo.org/geos/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ + https://download.osgeo.org/geos/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ b248842dee2afa6e944693c21571a2999dfafc5a mason_extract_tar_bz2 @@ -24,7 +24,7 @@ function mason_compile { patch -N -p1 < ./patch.diff # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/geos/3.5.0/script.sh b/scripts/geos/3.5.0/script.sh index 213a5d3e6..e8a515b50 100755 --- a/scripts/geos/3.5.0/script.sh +++ b/scripts/geos/3.5.0/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libgeos.a function mason_load_source { mason_download \ - http://download.osgeo.org/geos/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ + https://download.osgeo.org/geos/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ a641469449fc32b829fb885cb0ea5fdd3333ce62 mason_extract_tar_bz2 @@ -23,7 +23,7 @@ function mason_compile { fi # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/geos/3.6.1/script.sh b/scripts/geos/3.6.1/script.sh index f47bad7b8..564efe7cb 100755 --- a/scripts/geos/3.6.1/script.sh +++ b/scripts/geos/3.6.1/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libgeos.a function mason_load_source { mason_download \ - http://download.osgeo.org/geos/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ + https://download.osgeo.org/geos/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ c23fa59e6ab8a4e8df634773fb1ac9794fc5d88a mason_extract_tar_bz2 @@ -23,7 +23,7 @@ function mason_compile { #fi # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/geos/3.6.2/script.sh b/scripts/geos/3.6.2/script.sh index 44098b23d..55433a74a 100755 --- a/scripts/geos/3.6.2/script.sh +++ b/scripts/geos/3.6.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libgeos.a function mason_load_source { mason_download \ - http://download.osgeo.org/geos/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ + https://download.osgeo.org/geos/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ 9d602e108e77005b0a2eed0bb1fecae8669a706d mason_extract_tar_bz2 @@ -23,7 +23,7 @@ function mason_compile { #fi # note: we put ${STDLIB_CXXFLAGS} into CXX instead of LDFLAGS due to libtool oddity: - # http://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass + # https://stackoverflow.com/questions/16248360/autotools-libtool-link-library-with-libstdc-despite-stdlib-libc-option-pass if [[ $(uname -s) == 'Darwin' ]]; then CXX="${CXX} -stdlib=libc++ -std=c++11" fi diff --git a/scripts/geowave-jace/0.8.7/script.sh b/scripts/geowave-jace/0.8.7/script.sh index e8e524924..54fbb9218 100755 --- a/scripts/geowave-jace/0.8.7/script.sh +++ b/scripts/geowave-jace/0.8.7/script.sh @@ -7,7 +7,7 @@ MASON_LIB_FILE=lib/libjace.a . ${MASON_DIR}/mason.sh function mason_load_source { - mason_download http://s3.amazonaws.com/geowave-rpms/release/TARBALL/geowave-0.8.7-c8ef40c-jace-source.tar.gz \ + mason_download https://s3.amazonaws.com/geowave-rpms/release/TARBALL/geowave-0.8.7-c8ef40c-jace-source.tar.gz \ 80f7002a063c6b178366e7376597acc53859558b mason_extract_tar_gz diff --git a/scripts/glfw/2018-06-27-0be4f3f/.travis.yml b/scripts/glfw/2018-06-27-0be4f3f/.travis.yml new file mode 100644 index 000000000..bf4610acb --- /dev/null +++ b/scripts/glfw/2018-06-27-0be4f3f/.travis.yml @@ -0,0 +1,22 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + - george-edison55-precise-backports + packages: [ 'libstdc++-5-dev', 'cmake', 'cmake-data', + 'libxi-dev', 'libglu1-mesa-dev', 'x11proto-randr-dev', 'x11proto-xext-dev', 'libxrandr-dev', + 'x11proto-xf86vidmode-dev', 'libxxf86vm-dev', 'libxcursor-dev', 'libxinerama-dev' ] + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/glfw/2018-06-27-0be4f3f/script.sh b/scripts/glfw/2018-06-27-0be4f3f/script.sh new file mode 100755 index 000000000..8fc105186 --- /dev/null +++ b/scripts/glfw/2018-06-27-0be4f3f/script.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +MASON_NAME=glfw +MASON_VERSION=2018-06-27-0be4f3f +MASON_LIB_FILE=lib/libglfw3.a +MASON_PKGCONFIG_FILE=lib/pkgconfig/glfw3.pc + +GLFW_HASH=0be4f3f75aebd9d24583ee86590a38e741db0904 + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/glfw/archive/${GLFW_HASH}.tar.gz \ + 6d5721a90513b270530e5b49b41641abaf346a58 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${GLFW_HASH} +} + +function mason_compile { + rm -rf build + mkdir build + cd build + cmake .. \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} \ + -DCMAKE_INCLUDE_PATH=${MASON_PREFIX}/include \ + -DCMAKE_LIBRARY_PATH=${MASON_PREFIX}/lib \ + -DBUILD_SHARED_LIBS=OFF \ + -DGLFW_BUILD_DOCS=OFF \ + -DGLFW_BUILD_TESTS=OFF \ + -DGLFW_BUILD_EXAMPLES=OFF \ + -DCMAKE_BUILD_TYPE=Release + + make install -j${MASON_CONCURRENCY} +} + +function mason_ldflags { + LIBS=$(`mason_pkgconfig` --static --libs-only-l --libs-only-other) + echo ${LIBS//-lglfw3/} +} + +mason_run "$@" diff --git a/scripts/gtest/1.8.0_1-cxx11abi/.travis.yml b/scripts/gtest/1.8.0_1-cxx11abi/.travis.yml new file mode 100644 index 000000000..d607a2d3a --- /dev/null +++ b/scripts/gtest/1.8.0_1-cxx11abi/.travis.yml @@ -0,0 +1,20 @@ +language: cpp + +sudo: false + +compiler: clang + +matrix: + exclude: + - os: linux + include: + - os: linux + dist: trusty + env: MASON_PLATFORM=linux + +script: +- sudo perl -i -p -e "s/# define _GLIBCXX_USE_DUAL_ABI 0/# define _GLIBCXX_USE_DUAL_ABI 1/g;" /usr/include/x86_64-linux-gnu/c++/5/bits/c++config.h +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/gtest/1.8.0_1-cxx11abi/script.sh b/scripts/gtest/1.8.0_1-cxx11abi/script.sh new file mode 100755 index 000000000..74396e07c --- /dev/null +++ b/scripts/gtest/1.8.0_1-cxx11abi/script.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +REVISION=_1 +LIB_VERSION=1.8.0 + +MASON_NAME=gtest +MASON_VERSION=${LIB_VERSION}${REVISION} +MASON_LIB_FILE=lib/libgtest.a + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/google/googletest/archive/release-${LIB_VERSION}.tar.gz \ + a40df33faad4a1fb308282148296ad7d0df4dd7a + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/googletest-release-${LIB_VERSION}/googletest +} + +function mason_compile { + cd "${MASON_BUILD_PATH}" + + mkdir -p ${MASON_PREFIX}/lib + mkdir -p ${MASON_PREFIX}/include/gtest + cp -rv include ${MASON_PREFIX} + + rm -rf build + mkdir -p build + cd build + + cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} \ + -DCMAKE_CXX_FLAGS="${CXXFLAGS:-} -D_GLIBCXX_USE_CXX11_ABI=1" \ + .. + make VERBOSE=1 -j${MASON_CONCURRENCY} gtest + cp -v libgtest.a ${MASON_PREFIX}/lib +} + +function mason_cflags { + echo -isystem ${MASON_PREFIX}/include -I${MASON_PREFIX}/include +} + +function mason_ldflags { + echo -lpthread +} + +function mason_static_libs { + echo ${MASON_PREFIX}/lib/libgtest.a +} + +mason_run "$@" diff --git a/scripts/gtest/1.8.0_1/.travis.yml b/scripts/gtest/1.8.0_1/.travis.yml new file mode 100644 index 000000000..42bf890ec --- /dev/null +++ b/scripts/gtest/1.8.0_1/.travis.yml @@ -0,0 +1,45 @@ +language: cpp + +sudo: false + +compiler: clang + +addons: + apt: + update: true + sources: [ 'george-edison55-precise-backports' ] + packages: [ 'cmake', 'cmake-data' ] + +matrix: + exclude: + - os: linux + include: + - os: osx + osx_image: xcode9.3 + env: MASON_PLATFORM=ios + - os: osx + osx_image: xcode9.3 + env: MASON_PLATFORM=osx + - os: linux + dist: trusty + env: MASON_PLATFORM=linux + - os: linux + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v5 + - os: linux + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v7 + - os: linux + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v8 + - os: linux + env: MASON_PLATFORM=android MASON_ANDROID_ABI=x86 + - os: linux + env: MASON_PLATFORM=android MASON_ANDROID_ABI=x86-64 + - os: linux + env: MASON_PLATFORM=android MASON_ANDROID_ABI=mips + - os: linux + env: MASON_PLATFORM=android MASON_ANDROID_ABI=mips-64 + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/gtest/1.8.0_1/script.sh b/scripts/gtest/1.8.0_1/script.sh new file mode 100755 index 000000000..4d00eb160 --- /dev/null +++ b/scripts/gtest/1.8.0_1/script.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +REVISION=_1 +LIB_VERSION=1.8.0 + +MASON_NAME=gtest +MASON_VERSION=${LIB_VERSION}${REVISION} +MASON_LIB_FILE=lib/libgtest.a + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/google/googletest/archive/release-${LIB_VERSION}.tar.gz \ + a40df33faad4a1fb308282148296ad7d0df4dd7a + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/googletest-release-${LIB_VERSION}/googletest +} + +function mason_build { + mason_load_source + mason_step "Building for Platform '${MASON_PLATFORM}/${MASON_PLATFORM_VERSION}'..." + cd "${MASON_BUILD_PATH}" + + mkdir -p ${MASON_PREFIX}/lib + mkdir -p ${MASON_PREFIX}/include/gtest + cp -rv include ${MASON_PREFIX} + + rm -rf build + mkdir -p build + cd build + if [ ${MASON_PLATFORM} = 'ios' ]; then + cmake \ + -GXcode \ + -DCMAKE_TOOLCHAIN_FILE=${MASON_DIR}/utils/ios.cmake \ + .. + xcodebuild -configuration Release -sdk iphoneos + xcodebuild -configuration Release -sdk iphonesimulator + + mason_substep "Creating Universal Binary..." + LIB_FOLDERS="Release-iphoneos Release-iphonesimulator" + mkdir -p ${MASON_PREFIX}/lib + for LIB in $(find ${LIB_FOLDERS} -name "*.a" | xargs basename | sort | uniq) ; do + lipo -create $(find ${LIB_FOLDERS} -name "${LIB}") -output ${MASON_PREFIX}/lib/${LIB} + lipo -info ${MASON_PREFIX}/lib/${LIB} + done + elif [ ${MASON_PLATFORM} = 'android' ]; then + ${MASON_DIR}/utils/android.sh > toolchain.cmake + cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} \ + -DCMAKE_TOOLCHAIN_FILE=toolchain.cmake \ + .. + make VERBOSE=1 -j${MASON_CONCURRENCY} gtest + cp -v libgtest.a ${MASON_PREFIX}/lib + else + cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} \ + .. + make VERBOSE=1 -j${MASON_CONCURRENCY} gtest + cp -v libgtest.a ${MASON_PREFIX}/lib + fi +} + +function mason_cflags { + echo -isystem ${MASON_PREFIX}/include -I${MASON_PREFIX}/include +} + +function mason_ldflags { + if [ ${MASON_PLATFORM} != 'android' ]; then + echo -lpthread + fi +} + +function mason_static_libs { + echo ${MASON_PREFIX}/lib/libgtest.a +} + + +mason_run "$@" diff --git a/scripts/gzip-hpp/0.1.0/.travis.yml b/scripts/gzip-hpp/0.1.0/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/gzip-hpp/0.1.0/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/gzip-hpp/0.1.0/script.sh b/scripts/gzip-hpp/0.1.0/script.sh new file mode 100644 index 000000000..49206b3ca --- /dev/null +++ b/scripts/gzip-hpp/0.1.0/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=gzip-hpp +MASON_VERSION=0.1.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/gzip-hpp/archive/v${MASON_VERSION}.tar.gz \ + 7bd14b3b9f63a05a7a09264cdda93c741666e835 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/gzip-hpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/gzip ${MASON_PREFIX}/include/gzip +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/gzip-hpp/a4cfa6a638de351d26834cf2fea373693cdaa927/.travis.yml b/scripts/gzip-hpp/a4cfa6a638de351d26834cf2fea373693cdaa927/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/gzip-hpp/a4cfa6a638de351d26834cf2fea373693cdaa927/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/gzip/a4cfa6a638de351d26834cf2fea373693cdaa927/script.sh b/scripts/gzip-hpp/a4cfa6a638de351d26834cf2fea373693cdaa927/script.sh similarity index 100% rename from scripts/gzip/a4cfa6a638de351d26834cf2fea373693cdaa927/script.sh rename to scripts/gzip-hpp/a4cfa6a638de351d26834cf2fea373693cdaa927/script.sh diff --git a/scripts/gzip-hpp/bb80aac/.travis.yml b/scripts/gzip-hpp/bb80aac/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/gzip-hpp/bb80aac/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/gzip/bb80aac/script.sh b/scripts/gzip-hpp/bb80aac/script.sh similarity index 100% rename from scripts/gzip/bb80aac/script.sh rename to scripts/gzip-hpp/bb80aac/script.sh diff --git a/scripts/harfbuzz/0.9.40/script.sh b/scripts/harfbuzz/0.9.40/script.sh index 72eef56c7..187a751bc 100755 --- a/scripts/harfbuzz/0.9.40/script.sh +++ b/scripts/harfbuzz/0.9.40/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/harfbuzz.pc function mason_load_source { mason_download \ - http://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION}.tar.bz2 \ + https://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION}.tar.bz2 \ a685da85d38c37fd27603165642fc09feb7ae7c1 mason_extract_tar_bz2 diff --git a/scripts/harfbuzz/0.9.41/script.sh b/scripts/harfbuzz/0.9.41/script.sh index 08bc1d53b..2d81fb479 100755 --- a/scripts/harfbuzz/0.9.41/script.sh +++ b/scripts/harfbuzz/0.9.41/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/harfbuzz.pc function mason_load_source { mason_download \ - http://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION}.tar.bz2 \ + https://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION}.tar.bz2 \ a7d4c722f7d663dfa51503c0c857046b86495a69 mason_extract_tar_bz2 diff --git a/scripts/harfbuzz/1.1.2/script.sh b/scripts/harfbuzz/1.1.2/script.sh index 6fbfa6e94..aac870154 100755 --- a/scripts/harfbuzz/1.1.2/script.sh +++ b/scripts/harfbuzz/1.1.2/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/harfbuzz.pc function mason_load_source { mason_download \ - http://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION}.tar.bz2 \ + https://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION}.tar.bz2 \ bcd27708cca5f47c11dba7d2030f33af3ae4f0cf mason_extract_tar_bz2 diff --git a/scripts/harfbuzz/1.2.1/script.sh b/scripts/harfbuzz/1.2.1/script.sh index 68343dde2..602b52db5 100755 --- a/scripts/harfbuzz/1.2.1/script.sh +++ b/scripts/harfbuzz/1.2.1/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/harfbuzz.pc function mason_load_source { mason_download \ - http://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION}.tar.bz2 \ + https://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION}.tar.bz2 \ 977370efcc118ddd14a9553d9a608b2619b2f786 mason_extract_tar_bz2 diff --git a/scripts/harfbuzz/1.2.6/script.sh b/scripts/harfbuzz/1.2.6/script.sh index be14fe854..992d85583 100755 --- a/scripts/harfbuzz/1.2.6/script.sh +++ b/scripts/harfbuzz/1.2.6/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/harfbuzz.pc function mason_load_source { mason_download \ - http://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION}.tar.bz2 \ + https://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION}.tar.bz2 \ e8c05c3e91603b7f0de9607e66475fdaa4c02970 mason_extract_tar_bz2 diff --git a/scripts/harfbuzz/1.3.0/script.sh b/scripts/harfbuzz/1.3.0/script.sh index d10422b88..5bf753314 100755 --- a/scripts/harfbuzz/1.3.0/script.sh +++ b/scripts/harfbuzz/1.3.0/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/harfbuzz.pc function mason_load_source { mason_download \ - http://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION}.tar.bz2 \ + https://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION}.tar.bz2 \ f5674500c67484caa2c9936270d0a100e52f56f0 mason_extract_tar_bz2 diff --git a/scripts/harfbuzz/1.4.2-ft/script.sh b/scripts/harfbuzz/1.4.2-ft/script.sh index 9de4b9fc9..f49b00cc2 100755 --- a/scripts/harfbuzz/1.4.2-ft/script.sh +++ b/scripts/harfbuzz/1.4.2-ft/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/harfbuzz.pc function mason_load_source { mason_download \ - http://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION/-ft/}.tar.bz2 \ + https://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION/-ft/}.tar.bz2 \ d8b08c8d792500f414472c8a54f69b08aabb06b4 mason_extract_tar_bz2 diff --git a/scripts/harfbuzz/1.4.2/script.sh b/scripts/harfbuzz/1.4.2/script.sh index 7efacc5d9..d7e036f6c 100755 --- a/scripts/harfbuzz/1.4.2/script.sh +++ b/scripts/harfbuzz/1.4.2/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/harfbuzz.pc function mason_load_source { mason_download \ - http://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION}.tar.bz2 \ + https://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION}.tar.bz2 \ d8b08c8d792500f414472c8a54f69b08aabb06b4 mason_extract_tar_bz2 diff --git a/scripts/harfbuzz/1.4.4-ft/script.sh b/scripts/harfbuzz/1.4.4-ft/script.sh index d38df1d22..dcc16c088 100755 --- a/scripts/harfbuzz/1.4.4-ft/script.sh +++ b/scripts/harfbuzz/1.4.4-ft/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/harfbuzz.pc function mason_load_source { mason_download \ - http://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION/-ft/}.tar.bz2 \ + https://www.freedesktop.org/software/harfbuzz/release/harfbuzz-${MASON_VERSION/-ft/}.tar.bz2 \ 276e4b050f8488903a1ced53fd9a303073d47f06 mason_extract_tar_bz2 @@ -21,8 +21,12 @@ function mason_prepare_compile { FREETYPE_VERSION="2.7.1" ${MASON_DIR}/mason install freetype ${FREETYPE_VERSION} MASON_FREETYPE=$(${MASON_DIR}/mason prefix freetype ${FREETYPE_VERSION}) - MASON_PLATFORM= ${MASON_DIR}/mason install ragel 6.9 - export PATH=$(MASON_PLATFORM= ${MASON_DIR}/mason prefix ragel 6.9)/bin:$PATH + + pushd ${MASON_ROOT}/.. + env -i HOME="$HOME" PATH="$PATH" USER="$USER" ${MASON_DIR}/mason build ragel 6.9 + popd + +export PATH=$(MASON_PLATFORM= ${MASON_DIR}/mason prefix ragel 6.9)/bin:$PATH export PKG_CONFIG_PATH="$(${MASON_DIR}/mason prefix freetype ${FREETYPE_VERSION})/lib/pkgconfig":${PKG_CONFIG_PATH:-} export C_INCLUDE_PATH="${MASON_FREETYPE}/include/freetype2" export CPLUS_INCLUDE_PATH="${MASON_FREETYPE}/include/freetype2" diff --git a/scripts/icu/54.1/script.sh b/scripts/icu/54.1/script.sh index 4620dd5ee..05e708aa4 100755 --- a/scripts/icu/54.1/script.sh +++ b/scripts/icu/54.1/script.sh @@ -9,7 +9,7 @@ MASON_LIB_FILE=lib/libicuuc.a function mason_load_source { mason_download \ - http://download.icu-project.org/files/icu4c/54.1/icu4c-54_1-src.tgz \ + https://download.icu-project.org/files/icu4c/54.1/icu4c-54_1-src.tgz \ d0f79be346f75862ccef8fd641e429d9c129ac14 mason_extract_tar_gz @@ -19,7 +19,7 @@ function mason_load_source { function mason_compile { # note: -DUCONFIG_NO_BREAK_ITERATION=1 is desired by mapnik (for toTitle) - # http://www.icu-project.org/apiref/icu4c/uconfig_8h_source.html + # https://www.icu-project.org/apiref/icu4c/uconfig_8h_source.html export ICU_CORE_CPP_FLAGS="-DU_CHARSET_IS_UTF8=1" # disabled due to breakage with node-mapnik on OS X: https://github.com/mapnik/mapnik-packaging/issues/98 # -DU_USING_ICU_NAMESPACE=0 -DU_STATIC_IMPLEMENTATION=1 -DU_TIMEZONE=0 -DUCONFIG_NO_LEGACY_CONVERSION=1 -DUCONFIG_NO_FORMATTING=1 -DUCONFIG_NO_TRANSLITERATION=1 -DUCONFIG_NO_REGULAR_EXPRESSIONS=1" diff --git a/scripts/icu/55.1/script.sh b/scripts/icu/55.1/script.sh index a161f9917..c0f19a9fc 100755 --- a/scripts/icu/55.1/script.sh +++ b/scripts/icu/55.1/script.sh @@ -9,7 +9,7 @@ MASON_LIB_FILE=lib/libicuuc.a function mason_load_source { mason_download \ - http://download.icu-project.org/files/icu4c/55.1/icu4c-55_1-src.tgz \ + https://download.icu-project.org/files/icu4c/55.1/icu4c-55_1-src.tgz \ 0b38bcdde97971917f0039eeeb5d070ed29e5ad7 mason_extract_tar_gz @@ -19,7 +19,7 @@ function mason_load_source { function mason_compile { # note: -DUCONFIG_NO_BREAK_ITERATION=1 is desired by mapnik (for toTitle) - # http://www.icu-project.org/apiref/icu4c/uconfig_8h_source.html + # https://www.icu-project.org/apiref/icu4c/uconfig_8h_source.html export ICU_CORE_CPP_FLAGS="-DU_CHARSET_IS_UTF8=1" # disabled due to breakage with node-mapnik on OS X: https://github.com/mapnik/mapnik-packaging/issues/98 # -DU_USING_ICU_NAMESPACE=0 -DU_STATIC_IMPLEMENTATION=1 -DU_TIMEZONE=0 -DUCONFIG_NO_LEGACY_CONVERSION=1 -DUCONFIG_NO_FORMATTING=1 -DUCONFIG_NO_TRANSLITERATION=1 -DUCONFIG_NO_REGULAR_EXPRESSIONS=1" diff --git a/scripts/icu/57.1/script.sh b/scripts/icu/57.1/script.sh index a44325f7e..858ecd30d 100755 --- a/scripts/icu/57.1/script.sh +++ b/scripts/icu/57.1/script.sh @@ -14,7 +14,7 @@ MASON_CROSS_BUILD=0 function mason_load_source { mason_download \ - http://download.icu-project.org/files/icu4c/57.1/icu4c-57_1-src.tgz \ + https://download.icu-project.org/files/icu4c/57.1/icu4c-57_1-src.tgz \ c40f6ec922e10a50812157eae28969c528982196 mason_extract_tar_gz diff --git a/scripts/icu/58.1-brkitr/.travis.yml b/scripts/icu/58.1-brkitr/.travis.yml new file mode 100644 index 000000000..370fbba2b --- /dev/null +++ b/scripts/icu/58.1-brkitr/.travis.yml @@ -0,0 +1,41 @@ +language: generic + +matrix: + include: + - os: osx + compiler: clang + sudo: false + - os: osx + env: MASON_PLATFORM=ios + compiler: clang + - os: linux + env: MASON_PLATFORM_VERSION=cortex_a9 + - os: linux + env: MASON_PLATFORM_VERSION=i686 + - os: linux + compiler: clang + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v5 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v7 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v8 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=x86 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=x86-64 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=mips + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=mips-64 + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/icu/58.1-brkitr/script.sh b/scripts/icu/58.1-brkitr/script.sh new file mode 100755 index 000000000..aa13012ce --- /dev/null +++ b/scripts/icu/58.1-brkitr/script.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash + +# Build ICU common package (libicuuc.a) with data file separate and with support for legacy conversion and break iteration turned off in order to minimize size + +MASON_NAME=icu +MASON_VERSION=58.1-brkitr +MASON_LIB_FILE=lib/libicuuc.a +#MASON_PKGCONFIG_FILE=lib/pkgconfig/icu-uc.pc + +. ${MASON_DIR}/mason.sh + +MASON_BUILD_DEBUG=0 # Enable to build library with debug symbols +MASON_CROSS_BUILD=0 + +function mason_load_source { + mason_download \ + https://download.icu-project.org/files/icu4c/58.1/icu4c-58_1-src.tgz \ + ad6995ba349ed79dde0f25d125a9b0bb56979420 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME} +} + +function mason_prepare_compile { + if [[ ${MASON_PLATFORM} == 'ios' || ${MASON_PLATFORM} == 'android' || ${MASON_PLATFORM_VERSION} != `uname -m` ]]; then + mason_substep "Cross-compiling ICU. Starting with host build of ICU to generate tools." + + pushd ${MASON_ROOT}/.. + env -i HOME="$HOME" PATH="$PATH" USER="$USER" ${MASON_DIR}/mason build icu ${MASON_VERSION} + popd + + # TODO: Copies a bunch of files to a kind of orphaned place, do we need to do something to clean up after the build? + # Copying the whole build directory is the easiest way to do a cross build, but we could limit this to a small subset of files (icucross.mk, the tools directory, probably a few others...) + # Also instead of using the regular build steps, we could use a dedicated built target that just builds the tools + mason_substep "Moving host ICU build directory to ${MASON_ROOT}/.build/icu-host" + rm -rf ${MASON_ROOT}/.build/icu-host + cp -R ${MASON_BUILD_PATH}/source ${MASON_ROOT}/.build/icu-host + fi +} + +function mason_compile { + if [[ ${MASON_PLATFORM} == 'ios' || ${MASON_PLATFORM} == 'android' || ${MASON_PLATFORM_VERSION} != `uname -m` ]]; then + MASON_CROSS_BUILD=1 + fi + mason_compile_base +} + +function mason_compile_base { + pushd ${MASON_BUILD_PATH}/source + + # Using uint_least16_t instead of char16_t because Android Clang doesn't recognize char16_t + # I'm being shady and telling users of the library to use char16_t, so there's an implicit raw cast + ICU_CORE_CPP_FLAGS="-DU_CHARSET_IS_UTF8=1 -DU_CHAR_TYPE=uint_least16_t" + ICU_MODULE_CPP_FLAGS="${ICU_CORE_CPP_FLAGS} -DUCONFIG_NO_LEGACY_CONVERSION=1" # -DUCONFIG_NO_BREAK_ITERATION=1" + + CPPFLAGS="${CPPFLAGS:-} ${ICU_CORE_CPP_FLAGS} ${ICU_MODULE_CPP_FLAGS} -fvisibility=hidden $(icu_debug_cpp)" + #CXXFLAGS="--std=c++0x" + echo "Configuring with ${MASON_HOST_ARG}" + + ./configure ${MASON_HOST_ARG} --prefix=${MASON_PREFIX} \ + $(icu_debug_configure) \ + $(cross_build_configure) \ + --with-data-packaging=archive \ + --enable-renaming \ + --enable-strict \ + --enable-static \ + --enable-draft \ + --disable-rpath \ + --disable-shared \ + --disable-tests \ + --disable-extras \ + --disable-tracing \ + --disable-layout \ + --disable-icuio \ + --disable-samples \ + --disable-dyload || cat config.log + + + # Must do make clean after configure to clear out object files left over from previous build on different architecture + make clean + make -j${MASON_CONCURRENCY} + make install + popd +} + +function icu_debug_cpp { + if [ ${MASON_BUILD_DEBUG} ]; then + echo "-glldb" + fi +} + +function icu_debug_configure { + if [ ${MASON_BUILD_DEBUG} == 1 ]; then + echo "--enable-debug --disable-release" + else + echo "--enable-release --disable-debug" + fi +} + +function cross_build_configure { + # Building tools is disabled in cross-build mode. Using the host-built version of the tools is the whole point of the --with-cross-build flag + if [ ${MASON_CROSS_BUILD} == 1 ]; then + echo "--with-cross-build=${MASON_ROOT}/.build/icu-host --disable-tools" + else + echo "--enable-tools" + fi +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include -DUCHAR_TYPE=char16_t" +} + +function mason_ldflags { + echo "" +} + +mason_run "$@" diff --git a/scripts/icu/58.1-min-size/script.sh b/scripts/icu/58.1-min-size/script.sh index 7102b5e64..e865dba4d 100644 --- a/scripts/icu/58.1-min-size/script.sh +++ b/scripts/icu/58.1-min-size/script.sh @@ -14,7 +14,7 @@ MASON_CROSS_BUILD=0 function mason_load_source { mason_download \ - http://download.icu-project.org/files/icu4c/58.1/icu4c-58_1-src.tgz \ + https://download.icu-project.org/files/icu4c/58.1/icu4c-58_1-src.tgz \ ad6995ba349ed79dde0f25d125a9b0bb56979420 mason_extract_tar_gz diff --git a/scripts/icu/58.1/script.sh b/scripts/icu/58.1/script.sh index ef2c38df6..0668ae097 100755 --- a/scripts/icu/58.1/script.sh +++ b/scripts/icu/58.1/script.sh @@ -14,7 +14,7 @@ MASON_CROSS_BUILD=0 function mason_load_source { mason_download \ - http://download.icu-project.org/files/icu4c/58.1/icu4c-58_1-src.tgz \ + https://download.icu-project.org/files/icu4c/58.1/icu4c-58_1-src.tgz \ ad6995ba349ed79dde0f25d125a9b0bb56979420 mason_extract_tar_gz diff --git a/scripts/icu/63.1-min-static-data-1/.travis.yml b/scripts/icu/63.1-min-static-data-1/.travis.yml new file mode 100644 index 000000000..370fbba2b --- /dev/null +++ b/scripts/icu/63.1-min-static-data-1/.travis.yml @@ -0,0 +1,41 @@ +language: generic + +matrix: + include: + - os: osx + compiler: clang + sudo: false + - os: osx + env: MASON_PLATFORM=ios + compiler: clang + - os: linux + env: MASON_PLATFORM_VERSION=cortex_a9 + - os: linux + env: MASON_PLATFORM_VERSION=i686 + - os: linux + compiler: clang + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v5 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v7 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v8 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=x86 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=x86-64 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=mips + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=mips-64 + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/icu/63.1-min-static-data-1/script.sh b/scripts/icu/63.1-min-static-data-1/script.sh new file mode 100644 index 000000000..8cbc18f17 --- /dev/null +++ b/scripts/icu/63.1-min-static-data-1/script.sh @@ -0,0 +1,150 @@ +#!/usr/bin/env bash + +# Build ICU common package (libicuuc.a) with data file separate and with support for legacy conversion and break iteration turned off in order to minimize size + +MASON_NAME=icu +MASON_VERSION=63.1-min-static-data-1 +MASON_LIB_FILE=lib/libicuuc.a +#MASON_PKGCONFIG_FILE=lib/pkgconfig/icu-uc.pc + +. ${MASON_DIR}/mason.sh + +MASON_BUILD_DEBUG=0 # Enable to build library with debug symbols +MASON_CROSS_BUILD=0 + +function mason_load_source { + # you can't get the data and the code together in one place except when you clone the repo + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} + if [[ ! -d ${MASON_BUILD_PATH} ]]; then + git clone https://github.com/unicode-org/icu --depth=1 --branch release-63-1 ${MASON_BUILD_PATH} + fi +} + +function mason_prepare_compile { + if [[ ${MASON_PLATFORM} == 'ios' || ${MASON_PLATFORM} == 'android' || ${MASON_PLATFORM_VERSION} != `uname -m` ]]; then + mason_substep "Cross-compiling ICU. Starting with host build of ICU to generate tools." + + pushd ${MASON_ROOT}/.. + env -i HOME="$HOME" PATH="$PATH" USER="$USER" ${MASON_DIR}/mason build ${MASON_NAME} ${MASON_VERSION} + popd + + # TODO: Copies a bunch of files to a kind of orphaned place, do we need to do something to clean up after the build? + # Copying the whole build directory is the easiest way to do a cross build, but we could limit this to a small subset of files (icucross.mk, the tools directory, probably a few others...) + # Also instead of using the regular build steps, we could use a dedicated built target that just builds the tools + mason_substep "Moving host ICU build directory to ${MASON_ROOT}/.build/icu-host" + rm -rf ${MASON_ROOT}/.build/icu-host + cp -R ${MASON_BUILD_PATH}/icu4c/source ${MASON_ROOT}/.build/icu-host + fi +} + +function trim_data { + mason_substep "Trimming ICU data to small number of locals and datasets" + + # for some reason there is also a dat file in there! + rm -f data/in/icudt63l.dat + + # move all non algorithmic code point mappings so they arent used + for f in $(find data/mappings -name '*.mk'); do + mv $f ${f}_ + done + + # make local versions of each data set + for f in $(find data -path mappings -prune -o -name '*files.mk'); do + # make a local version and remove the line continuations + l=$(echo $f | sed -e "s/\(.*\)files.mk/\1local.mk/g") + sed -e :a -e '/\\$/N; s/\\\n//; ta' $f > $l + + # if its locale, unit or currency we use our supported lang list + if [ "$(echo $l | grep -cE 'locales|unit|curr')" -eq 1 ]; then + sed -i'' -e '/^#/!s/SOURCE.*=.*txt/SOURCE = da.txt de.txt en.txt eo.txt es.txt fi.txt fr.txt he.txt id.txt it.txt ko.txt my.txt nl.txt pl.txt pt.txt pt_PT.txt ro.txt ru.txt sv.txt tr.txt uk.txt vi.txt zh.txt zh_Hans.txt/g' $l + # if its misc we need a couple of things + elif [ $(echo $l | grep -cF "misc") -eq 1 ]; then + sed -i'' -e '/^#/!s/SOURCE.*=.*txt/SOURCE = plurals.txt numberingSystems.txt icuver.txt icustd.txt pluralRanges.txt/g' $l + # otherwise the scuttle the whole thing + else + sed -i'' -e '/^#/!s/SOURCE.*=.*txt/SOURCE = /g' $l + fi + done +} + +function mason_compile { + if [[ ${MASON_PLATFORM} == 'ios' || ${MASON_PLATFORM} == 'android' || ${MASON_PLATFORM_VERSION} != `uname -m` ]]; then + MASON_CROSS_BUILD=1 + fi + mason_compile_base +} + +function mason_compile_base { + pushd ${MASON_BUILD_PATH}/icu4c/source + + # trim out a bunch of the data so that the data static library is as small as possible + trim_data + + # Using uint_least16_t instead of char16_t because Android Clang doesn't recognize char16_t + # I'm being shady and telling users of the library to use char16_t, so there's an implicit raw cast + ICU_CORE_CPP_FLAGS="-DU_CHARSET_IS_UTF8=1" + ICU_MODULE_CPP_FLAGS="${ICU_CORE_CPP_FLAGS} -DUCONFIG_NO_LEGACY_CONVERSION=1 -DUCONFIG_NO_BREAK_ITERATION=1" + + CFLAGS="${CFLAGS:-} ${ICU_CORE_CPP_FLAGS} ${ICU_MODULE_CPP_FLAGS} -fvisibility=hidden $(icu_debug_cpp) -Os" + CXXFLAGS="${CXXFLAGS:-} ${ICU_CORE_CPP_FLAGS} ${ICU_MODULE_CPP_FLAGS} -fvisibility=hidden $(icu_debug_cpp) -Os" + + echo "Configuring with ${MASON_HOST_ARG}" + + ./configure ${MASON_HOST_ARG} --prefix=${MASON_PREFIX} \ + $(icu_debug_configure) \ + $(cross_build_configure) \ + --with-data-packaging=static \ + --enable-renaming \ + --enable-strict \ + --enable-static \ + --enable-draft \ + --disable-rpath \ + --disable-shared \ + --disable-tests \ + --disable-extras \ + --disable-tracing \ + --disable-layout \ + --disable-icuio \ + --disable-samples \ + --disable-dyload || cat config.log + + + # Must do make clean after configure to clear out object files left over from previous build on different architecture + make clean + make -j${MASON_CONCURRENCY} + make install + popd +} + +function icu_debug_cpp { + if [ ${MASON_BUILD_DEBUG} == 1 ]; then + echo "-g" + fi +} + +function icu_debug_configure { + if [ ${MASON_BUILD_DEBUG} == 1 ]; then + echo "--enable-debug --disable-release" + else + echo "--enable-release --disable-debug" + fi +} + +function cross_build_configure { + # Building tools is disabled in cross-build mode. Using the host-built version of the tools is the whole point of the --with-cross-build flag + if [ ${MASON_CROSS_BUILD} == 1 ]; then + echo "--with-cross-build=${MASON_ROOT}/.build/icu-host --disable-tools" + else + echo "--enable-tools" + fi +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + echo "" +} + +mason_run "$@" diff --git a/scripts/icu/63.1-min-static-data/.travis.yml b/scripts/icu/63.1-min-static-data/.travis.yml new file mode 100644 index 000000000..370fbba2b --- /dev/null +++ b/scripts/icu/63.1-min-static-data/.travis.yml @@ -0,0 +1,41 @@ +language: generic + +matrix: + include: + - os: osx + compiler: clang + sudo: false + - os: osx + env: MASON_PLATFORM=ios + compiler: clang + - os: linux + env: MASON_PLATFORM_VERSION=cortex_a9 + - os: linux + env: MASON_PLATFORM_VERSION=i686 + - os: linux + compiler: clang + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v5 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v7 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v8 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=x86 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=x86-64 + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=mips + - os: osx + env: MASON_PLATFORM=android MASON_ANDROID_ABI=mips-64 + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/icu/63.1-min-static-data/script.sh b/scripts/icu/63.1-min-static-data/script.sh new file mode 100644 index 000000000..1d2daa6d7 --- /dev/null +++ b/scripts/icu/63.1-min-static-data/script.sh @@ -0,0 +1,150 @@ +#!/usr/bin/env bash + +# Build ICU common package (libicuuc.a) with data file separate and with support for legacy conversion and break iteration turned off in order to minimize size + +MASON_NAME=icu +MASON_VERSION=63.1-min-static-data +MASON_LIB_FILE=lib/libicuuc.a +#MASON_PKGCONFIG_FILE=lib/pkgconfig/icu-uc.pc + +. ${MASON_DIR}/mason.sh + +MASON_BUILD_DEBUG=0 # Enable to build library with debug symbols +MASON_CROSS_BUILD=0 + +function mason_load_source { + # you can't get the data and the code together in one place except when you clone the repo + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} + if [[ ! -d ${MASON_BUILD_PATH} ]]; then + git clone https://github.com/unicode-org/icu --depth=1 --branch release-63-1 ${MASON_BUILD_PATH} + fi +} + +function mason_prepare_compile { + if [[ ${MASON_PLATFORM} == 'ios' || ${MASON_PLATFORM} == 'android' || ${MASON_PLATFORM_VERSION} != `uname -m` ]]; then + mason_substep "Cross-compiling ICU. Starting with host build of ICU to generate tools." + + pushd ${MASON_ROOT}/.. + env -i HOME="$HOME" PATH="$PATH" USER="$USER" ${MASON_DIR}/mason build ${MASON_NAME} ${MASON_VERSION} + popd + + # TODO: Copies a bunch of files to a kind of orphaned place, do we need to do something to clean up after the build? + # Copying the whole build directory is the easiest way to do a cross build, but we could limit this to a small subset of files (icucross.mk, the tools directory, probably a few others...) + # Also instead of using the regular build steps, we could use a dedicated built target that just builds the tools + mason_substep "Moving host ICU build directory to ${MASON_ROOT}/.build/icu-host" + rm -rf ${MASON_ROOT}/.build/icu-host + cp -R ${MASON_BUILD_PATH}/icu4c/source ${MASON_ROOT}/.build/icu-host + fi +} + +function trim_data { + mason_substep "Trimming ICU data to small number of locals and datasets" + + # for some reason there is also a dat file in there! + rm -f data/in/icudt63l.dat + + # move all non algorithmic code point mappings so they arent used + for f in $(find data/mappings -name '*.mk'); do + mv $f ${f}_ + done + + # make local versions of each data set + for f in $(find data -path mappings -prune -o -name '*files.mk'); do + # make a local version and remove the line continuations + l=$(echo $f | sed -e "s/\(.*\)files.mk/\1local.mk/g") + sed -e :a -e '/\\$/N; s/\\\n//; ta' $f > $l + + # if its locale, unit or currency we use our supported lang list + if [ "$(echo $l | grep -cE 'locales|unit|curr')" -eq 1 ]; then + sed -i'' -e '/^#/!s/SOURCE.*=.*txt/SOURCE = da.txt de.txt en.txt eo.txt es.txt fi.txt fr.txt he.txt id.txt it.txt ko.txt my.txt nl.txt pl.txt pt.txt pt_PT.txt ro.txt ru.txt sv.txt tr.txt uk.txt vi.txt zh.txt zh_Hans.txt/g' $l + # if its misc we need a couple of things + elif [ $(echo $l | grep -cF "misc") -eq 1 ]; then + sed -i'' -e '/^#/!s/SOURCE.*=.*txt/SOURCE = numberingSystems.txt icuver.txt icustd.txt/g' $l + # otherwise the scuttle the whole thing + else + sed -i'' -e '/^#/!s/SOURCE.*=.*txt/SOURCE = /g' $l + fi + done +} + +function mason_compile { + if [[ ${MASON_PLATFORM} == 'ios' || ${MASON_PLATFORM} == 'android' || ${MASON_PLATFORM_VERSION} != `uname -m` ]]; then + MASON_CROSS_BUILD=1 + fi + mason_compile_base +} + +function mason_compile_base { + pushd ${MASON_BUILD_PATH}/icu4c/source + + # trim out a bunch of the data so that the data static library is as small as possible + trim_data + + # Using uint_least16_t instead of char16_t because Android Clang doesn't recognize char16_t + # I'm being shady and telling users of the library to use char16_t, so there's an implicit raw cast + ICU_CORE_CPP_FLAGS="-DU_CHARSET_IS_UTF8=1" + ICU_MODULE_CPP_FLAGS="${ICU_CORE_CPP_FLAGS} -DUCONFIG_NO_LEGACY_CONVERSION=1 -DUCONFIG_NO_BREAK_ITERATION=1" + + CFLAGS="${CFLAGS:-} ${ICU_CORE_CPP_FLAGS} ${ICU_MODULE_CPP_FLAGS} -fvisibility=hidden $(icu_debug_cpp) -Os" + CXXFLAGS="${CXXFLAGS:-} ${ICU_CORE_CPP_FLAGS} ${ICU_MODULE_CPP_FLAGS} -fvisibility=hidden $(icu_debug_cpp) -Os" + + echo "Configuring with ${MASON_HOST_ARG}" + + ./configure ${MASON_HOST_ARG} --prefix=${MASON_PREFIX} \ + $(icu_debug_configure) \ + $(cross_build_configure) \ + --with-data-packaging=static \ + --enable-renaming \ + --enable-strict \ + --enable-static \ + --enable-draft \ + --disable-rpath \ + --disable-shared \ + --disable-tests \ + --disable-extras \ + --disable-tracing \ + --disable-layout \ + --disable-icuio \ + --disable-samples \ + --disable-dyload || cat config.log + + + # Must do make clean after configure to clear out object files left over from previous build on different architecture + make clean + make -j${MASON_CONCURRENCY} + make install + popd +} + +function icu_debug_cpp { + if [ ${MASON_BUILD_DEBUG} == 1 ]; then + echo "-g" + fi +} + +function icu_debug_configure { + if [ ${MASON_BUILD_DEBUG} == 1 ]; then + echo "--enable-debug --disable-release" + else + echo "--enable-release --disable-debug" + fi +} + +function cross_build_configure { + # Building tools is disabled in cross-build mode. Using the host-built version of the tools is the whole point of the --with-cross-build flag + if [ ${MASON_CROSS_BUILD} == 1 ]; then + echo "--with-cross-build=${MASON_ROOT}/.build/icu-host --disable-tools" + else + echo "--enable-tools" + fi +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + echo "" +} + +mason_run "$@" diff --git a/scripts/include-what-you-use/6.0.0/script.sh b/scripts/include-what-you-use/6.0.0/script.sh index 391259e3c..c21f18d8a 100755 --- a/scripts/include-what-you-use/6.0.0/script.sh +++ b/scripts/include-what-you-use/6.0.0/script.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + # dynamically determine the path to this package HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" diff --git a/scripts/include-what-you-use/6.0.1/.travis.yml b/scripts/include-what-you-use/6.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/include-what-you-use/6.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/include-what-you-use/6.0.1/script.sh b/scripts/include-what-you-use/6.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/include-what-you-use/6.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/include-what-you-use/7.0.0/script.sh b/scripts/include-what-you-use/7.0.0/script.sh index 391259e3c..c21f18d8a 100755 --- a/scripts/include-what-you-use/7.0.0/script.sh +++ b/scripts/include-what-you-use/7.0.0/script.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + # dynamically determine the path to this package HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" diff --git a/scripts/include-what-you-use/7.0.1/.travis.yml b/scripts/include-what-you-use/7.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/include-what-you-use/7.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/include-what-you-use/7.0.1/script.sh b/scripts/include-what-you-use/7.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/include-what-you-use/7.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/jemalloc/39b1b20/.travis.yml b/scripts/jemalloc/39b1b20/.travis.yml new file mode 100644 index 000000000..e5ec5ff23 --- /dev/null +++ b/scripts/jemalloc/39b1b20/.travis.yml @@ -0,0 +1,14 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + compiler: clang + - os: linux + compiler: clang + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/jemalloc/39b1b20/script.sh b/scripts/jemalloc/39b1b20/script.sh new file mode 100755 index 000000000..05154d034 --- /dev/null +++ b/scripts/jemalloc/39b1b20/script.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +MASON_NAME=jemalloc +MASON_VERSION=39b1b20 +MASON_LIB_FILE=lib/libjemalloc.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/jemalloc/jemalloc/tarball/${MASON_VERSION}/jemalloc-${MASON_VERSION} \ + 51b9ca5a7fd39562eeff4733dad1f536a39f414a + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/jemalloc-${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + # need to call autogen.sh when building from a gitsha only + ./autogen.sh + # oddly DNDEBUG is not automatically in the jemalloc flags, so we add it here for best perf/just to be safe (to 100% assert asserts are removed) + export CFLAGS="${CFLAGS:-} -DNDEBUG" + # note: as of jemalloc 4.5.0 CFLAGS no longer overwrites but appends. + # so we don't mess with CFLAGS here like previous packages where we needed to manually re-add the jemalloc CFLAGS that were lost + + # note: the below malloc-conf changes are based on reading https://github.com/jemalloc/jemalloc/pull/1179/files + # and noting that fb defaults to background_thread:true: https://github.com/jemalloc/jemalloc/issues/1128#issuecomment-378439640 + ./configure --prefix=${MASON_PREFIX} --disable-stats \ + --with-malloc-conf=background_thread:true,abort_conf:true + make -j${MASON_CONCURRENCY} VERBOSE=1 install_lib +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/jemalloc/5.1.0/.travis.yml b/scripts/jemalloc/5.1.0/.travis.yml new file mode 100644 index 000000000..e5ec5ff23 --- /dev/null +++ b/scripts/jemalloc/5.1.0/.travis.yml @@ -0,0 +1,14 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + compiler: clang + - os: linux + compiler: clang + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/jemalloc/5.1.0/script.sh b/scripts/jemalloc/5.1.0/script.sh new file mode 100755 index 000000000..ee52799d6 --- /dev/null +++ b/scripts/jemalloc/5.1.0/script.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +MASON_NAME=jemalloc +MASON_VERSION=5.1.0 +MASON_LIB_FILE=lib/libjemalloc.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/jemalloc/jemalloc/archive/${MASON_VERSION}.tar.gz \ + 58a6dc72ed15b914148f063f537e030ca45b2c97 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + # need to call autogen.sh when building from a gitsha only + ./autogen.sh + # oddly DNDEBUG is not automatically in the jemalloc flags, so we add it here for best perf/just to be safe (to 100% ensure asserts are removed) + # note: as of jemalloc 4.5.0 CFLAGS no longer overwrites but appends. + # so we don't mess with CFLAGS here like previous packages where we needed to manually re-add the jemalloc CFLAGS that were lost + export CFLAGS="${CFLAGS:-} -DNDEBUG" + + # note: the below malloc-conf changes are based on reading https://github.com/jemalloc/jemalloc/pull/1179/files + # and noting that fb defaults to background_thread:true: https://github.com/jemalloc/jemalloc/issues/1128#issuecomment-378439640 + ./configure --prefix=${MASON_PREFIX} --disable-stats \ + --with-malloc-conf=background_thread:true,abort_conf:true + make -j${MASON_CONCURRENCY} VERBOSE=1 install_lib +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/jni.hpp/4.0.0/.travis.yml b/scripts/jni.hpp/4.0.0/.travis.yml new file mode 100644 index 000000000..6b281de6c --- /dev/null +++ b/scripts/jni.hpp/4.0.0/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/jni.hpp/4.0.0/script.sh b/scripts/jni.hpp/4.0.0/script.sh new file mode 100755 index 000000000..3f19c33ee --- /dev/null +++ b/scripts/jni.hpp/4.0.0/script.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +MASON_NAME=jni.hpp +MASON_VERSION=4.0.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/jni.hpp/archive/v${MASON_VERSION}.tar.gz \ + b55c457d4f586fb35ba50b28c677736e0825c64a + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/jni.hpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX} + cp -vR include README.md LICENSE.txt ${MASON_PREFIX} +} + +function mason_cflags { + echo -isystem ${MASON_PREFIX}/include -I${MASON_PREFIX}/include +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" \ No newline at end of file diff --git a/scripts/jni.hpp/4.0.1/.travis.yml b/scripts/jni.hpp/4.0.1/.travis.yml new file mode 100644 index 000000000..6b281de6c --- /dev/null +++ b/scripts/jni.hpp/4.0.1/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/jni.hpp/4.0.1/script.sh b/scripts/jni.hpp/4.0.1/script.sh new file mode 100755 index 000000000..f8635c5b2 --- /dev/null +++ b/scripts/jni.hpp/4.0.1/script.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +MASON_NAME=jni.hpp +MASON_VERSION=4.0.1 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/jni.hpp/archive/v${MASON_VERSION}.tar.gz \ + e1783013641e6d6424421a88aa53e821289cac46 + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/jni.hpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX} + cp -vR include README.md LICENSE.txt ${MASON_PREFIX} +} + +function mason_cflags { + echo -isystem ${MASON_PREFIX}/include -I${MASON_PREFIX}/include +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" \ No newline at end of file diff --git a/scripts/jpeg/v9a/script.sh b/scripts/jpeg/v9a/script.sh index 55a63965a..523f556d9 100755 --- a/scripts/jpeg/v9a/script.sh +++ b/scripts/jpeg/v9a/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libjpeg.a function mason_load_source { mason_download \ - http://www.ijg.org/files/jpegsrc.v9a.tar.gz \ + https://www.ijg.org/files/jpegsrc.v9a.tar.gz \ fc3b1eefda3d8a193f9f92a16a1b0c9f56304b6d mason_extract_tar_gz diff --git a/scripts/jpeg_turbo/1.4.0/script.sh b/scripts/jpeg_turbo/1.4.0/script.sh index e95146c62..b5d142815 100755 --- a/scripts/jpeg_turbo/1.4.0/script.sh +++ b/scripts/jpeg_turbo/1.4.0/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libjpeg.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/libjpeg-turbo/1.4.0/libjpeg-turbo-1.4.0.tar.gz \ + https://downloads.sourceforge.net/project/libjpeg-turbo/1.4.0/libjpeg-turbo-1.4.0.tar.gz \ 6ce52501e0be70b15cd062efeca8fa57faf84a16 mason_extract_tar_gz diff --git a/scripts/jpeg_turbo/1.4.2/script.sh b/scripts/jpeg_turbo/1.4.2/script.sh index 1e385a2c0..46c075df4 100755 --- a/scripts/jpeg_turbo/1.4.2/script.sh +++ b/scripts/jpeg_turbo/1.4.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libjpeg.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/libjpeg-turbo/${MASON_VERSION}/libjpeg-turbo-${MASON_VERSION}.tar.gz \ + https://downloads.sourceforge.net/project/libjpeg-turbo/${MASON_VERSION}/libjpeg-turbo-${MASON_VERSION}.tar.gz \ d4638b2261ac3c1c20a2a2e1f8e19fc1f11bf524 mason_extract_tar_gz diff --git a/scripts/jpeg_turbo/1.5.0/script.sh b/scripts/jpeg_turbo/1.5.0/script.sh index 842a7d8b6..429037928 100755 --- a/scripts/jpeg_turbo/1.5.0/script.sh +++ b/scripts/jpeg_turbo/1.5.0/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libjpeg.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/libjpeg-turbo/${MASON_VERSION}/libjpeg-turbo-${MASON_VERSION}.tar.gz \ + https://downloads.sourceforge.net/project/libjpeg-turbo/${MASON_VERSION}/libjpeg-turbo-${MASON_VERSION}.tar.gz \ b90a76db4d0628bde8381150e355a858e3ced923 mason_extract_tar_gz diff --git a/scripts/jpeg_turbo/1.5.1/script.sh b/scripts/jpeg_turbo/1.5.1/script.sh index 25ed7c4ae..6114dca0a 100755 --- a/scripts/jpeg_turbo/1.5.1/script.sh +++ b/scripts/jpeg_turbo/1.5.1/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libjpeg.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/libjpeg-turbo/${MASON_VERSION}/libjpeg-turbo-${MASON_VERSION}.tar.gz \ + https://downloads.sourceforge.net/project/libjpeg-turbo/${MASON_VERSION}/libjpeg-turbo-${MASON_VERSION}.tar.gz \ 4038bb4242a3fc3387d5dc4e37fc2ac7fffaf5da mason_extract_tar_gz diff --git a/scripts/jpeg_turbo/1.5.2/script.sh b/scripts/jpeg_turbo/1.5.2/script.sh index 44f24c8a1..6ae15d9b4 100755 --- a/scripts/jpeg_turbo/1.5.2/script.sh +++ b/scripts/jpeg_turbo/1.5.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libjpeg.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/libjpeg-turbo/${MASON_VERSION}/libjpeg-turbo-${MASON_VERSION}.tar.gz \ + https://downloads.sourceforge.net/project/libjpeg-turbo/${MASON_VERSION}/libjpeg-turbo-${MASON_VERSION}.tar.gz \ 7096fcfbfd37439d6ae652379e48abe692c11d65 mason_extract_tar_gz diff --git a/scripts/kdbush/0.1.3/.travis.yml b/scripts/kdbush/0.1.3/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/kdbush/0.1.3/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/kdbush/0.1.3/script.sh b/scripts/kdbush/0.1.3/script.sh new file mode 100644 index 000000000..2646cebcd --- /dev/null +++ b/scripts/kdbush/0.1.3/script.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +MASON_NAME=kdbush +MASON_VERSION=0.1.3 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mourner/kdbush.hpp/archive/v0.1.3.tar.gz \ + b90b2c2664bf5ba2cdae31d532eb413ba6be68bc + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/kdbush.hpp-0.1.3 +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -v include/*.hpp ${MASON_PREFIX}/include + cp -v README.md LICENSE ${MASON_PREFIX} +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/lcov/1.12/script.sh b/scripts/lcov/1.12/script.sh index b7deca076..5eda1474f 100755 --- a/scripts/lcov/1.12/script.sh +++ b/scripts/lcov/1.12/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=usr/bin/lcov function mason_load_source { mason_download \ - http://downloads.sourceforge.net/ltp/lcov-1.12.tar.gz \ + https://downloads.sourceforge.net/ltp/lcov-1.12.tar.gz \ c7470ce9d89bb9c276ef7f461e9ab5b9c9935eff mason_extract_tar_gz diff --git a/scripts/libcurl/7.38.0-boringssl/script.sh b/scripts/libcurl/7.38.0-boringssl/script.sh index 06ad229c4..862a98841 100755 --- a/scripts/libcurl/7.38.0-boringssl/script.sh +++ b/scripts/libcurl/7.38.0-boringssl/script.sh @@ -12,7 +12,7 @@ MASON_PWD=`pwd` function mason_load_source { mason_download \ - http://curl.haxx.se/download/curl-7.38.0.tar.gz \ + https://curl.haxx.se/download/curl-7.38.0.tar.gz \ 5463f1b9dc807e4ae8be2ef4ed57e67f677f4426 mason_extract_tar_gz diff --git a/scripts/libcurl/7.40.0/script.sh b/scripts/libcurl/7.40.0/script.sh index 03d2adc1c..ec7da672b 100755 --- a/scripts/libcurl/7.40.0/script.sh +++ b/scripts/libcurl/7.40.0/script.sh @@ -10,7 +10,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libcurl.pc function mason_load_source { mason_download \ - http://curl.haxx.se/download/curl-7.40.0.tar.gz \ + https://curl.haxx.se/download/curl-7.40.0.tar.gz \ c7c97e02f5fa4302f4c25c72486359f7b46f7d6d mason_extract_tar_gz diff --git a/scripts/libcurl/7.45.0/script.sh b/scripts/libcurl/7.45.0/script.sh index 4d4f30180..310b81bf1 100755 --- a/scripts/libcurl/7.45.0/script.sh +++ b/scripts/libcurl/7.45.0/script.sh @@ -12,7 +12,7 @@ OPENSSL_VERSION=1.0.1p function mason_load_source { mason_download \ - http://curl.haxx.se/download/curl-${MASON_VERSION}.tar.gz \ + https://curl.haxx.se/download/curl-${MASON_VERSION}.tar.gz \ cf5b820a1ab30e49083784c46fe3ec9e6d2c84dc mason_extract_tar_gz diff --git a/scripts/libcurl/7.50.2/script.sh b/scripts/libcurl/7.50.2/script.sh index a0adff11c..a8c24f77c 100755 --- a/scripts/libcurl/7.50.2/script.sh +++ b/scripts/libcurl/7.50.2/script.sh @@ -12,7 +12,7 @@ OPENSSL_VERSION=1.0.2d function mason_load_source { mason_download \ - http://curl.haxx.se/download/curl-${MASON_VERSION}.tar.gz \ + https://curl.haxx.se/download/curl-${MASON_VERSION}.tar.gz \ 35d5c0d1dba88989961b3e95843c6b26a2d4fba8 mason_extract_tar_gz diff --git a/scripts/libdeflate/1.0/.travis.yml b/scripts/libdeflate/1.0/.travis.yml new file mode 100644 index 000000000..df03c6ef1 --- /dev/null +++ b/scripts/libdeflate/1.0/.travis.yml @@ -0,0 +1,14 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode9.2 + compiler: clang + - os: linux + compiler: clang + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/libdeflate/1.0/script.sh b/scripts/libdeflate/1.0/script.sh new file mode 100755 index 000000000..bfb9fb902 --- /dev/null +++ b/scripts/libdeflate/1.0/script.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +MASON_NAME=libdeflate +MASON_VERSION=1.0 +MASON_LIB_FILE=lib/libdeflate.a + +# Used when cross compiling to cortex_a9 +ZLIB_SHARED_VERSION=1.2.8 + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/ebiggers/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + 5255c4b15185451247032a29c480a198215384ec + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + # export CFLAGS="${CFLAGS:-} -O3 -DNDEBUG" + # export LDFLAGS="${CFLAGS:-}" + # we want -O3 for best performance + perl -i -p -e "s/-O2/-O3 -DNDEBUG/g;" Makefile + # note: -fomit-frame-pointer is in default flags for libdeflate + V=1 VERBOSE=1 make -j${MASON_CONCURRENCY} + mkdir -p ${MASON_PREFIX}/lib + cp libdeflate.a ${MASON_PREFIX}/lib/ + mkdir -p ${MASON_PREFIX}/include + cp libdeflate.h ${MASON_PREFIX}/include/ +} + +function mason_static_libs { + echo ${MASON_PREFIX}/${MASON_LIB_FILE} +} + +function mason_cflags { + echo -I${MASON_PREFIX}/include +} + +function mason_ldflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/libjpeg-turbo/1.4.2/script.sh b/scripts/libjpeg-turbo/1.4.2/script.sh index ab4294f04..d950f7977 100755 --- a/scripts/libjpeg-turbo/1.4.2/script.sh +++ b/scripts/libjpeg-turbo/1.4.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libjpeg.a function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/libjpeg-turbo/1.4.2/libjpeg-turbo-1.4.2.tar.gz \ + https://downloads.sourceforge.net/project/libjpeg-turbo/1.4.2/libjpeg-turbo-1.4.2.tar.gz \ d4638b2261ac3c1c20a2a2e1f8e19fc1f11bf524 mason_extract_tar_gz diff --git a/scripts/libosmium/2.14.0/.travis.yml b/scripts/libosmium/2.14.0/.travis.yml new file mode 100644 index 000000000..00c277c21 --- /dev/null +++ b/scripts/libosmium/2.14.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: linux + compiler: clang + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/libosmium/2.14.0/script.sh b/scripts/libosmium/2.14.0/script.sh new file mode 100755 index 000000000..0deb5cec4 --- /dev/null +++ b/scripts/libosmium/2.14.0/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=libosmium +MASON_VERSION=2.14.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + 79cade8fdf2162c00f376a299f73254013b8b48a + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/osmium ${MASON_PREFIX}/include/osmium +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/libosmium/2.14.1/.travis.yml b/scripts/libosmium/2.14.1/.travis.yml new file mode 100644 index 000000000..00c277c21 --- /dev/null +++ b/scripts/libosmium/2.14.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: linux + compiler: clang + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/libosmium/2.14.1/script.sh b/scripts/libosmium/2.14.1/script.sh new file mode 100755 index 000000000..8a5554099 --- /dev/null +++ b/scripts/libosmium/2.14.1/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=libosmium +MASON_VERSION=2.14.1 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + 6ae81009dc37bbb23c6b99156c2b2775c875ba56 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/osmium ${MASON_PREFIX}/include/osmium +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/libosmium/2.14.2/.travis.yml b/scripts/libosmium/2.14.2/.travis.yml new file mode 100644 index 000000000..00c277c21 --- /dev/null +++ b/scripts/libosmium/2.14.2/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: linux + compiler: clang + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/libosmium/2.14.2/script.sh b/scripts/libosmium/2.14.2/script.sh new file mode 100755 index 000000000..5c83e357b --- /dev/null +++ b/scripts/libosmium/2.14.2/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=libosmium +MASON_VERSION=2.14.2 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + 618e035bd7dce20487636323d40214177551d850 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/osmium ${MASON_PREFIX}/include/osmium +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/libosmium/2.15.0/.travis.yml b/scripts/libosmium/2.15.0/.travis.yml new file mode 100644 index 000000000..00c277c21 --- /dev/null +++ b/scripts/libosmium/2.15.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: linux + compiler: clang + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/libosmium/2.15.0/script.sh b/scripts/libosmium/2.15.0/script.sh new file mode 100755 index 000000000..8a7e0acb0 --- /dev/null +++ b/scripts/libosmium/2.15.0/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=libosmium +MASON_VERSION=2.15.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + 1c6baea1fffc3cdc45f835d5248300a4439368ba + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/osmium ${MASON_PREFIX}/include/osmium +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/libosmium/2.15.1/.travis.yml b/scripts/libosmium/2.15.1/.travis.yml new file mode 100644 index 000000000..00c277c21 --- /dev/null +++ b/scripts/libosmium/2.15.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: linux + compiler: clang + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/libosmium/2.15.1/script.sh b/scripts/libosmium/2.15.1/script.sh new file mode 100755 index 000000000..11101c473 --- /dev/null +++ b/scripts/libosmium/2.15.1/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=libosmium +MASON_VERSION=2.15.1 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + b73b2837906806f67c65045921070d3c6b0a9859 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/osmium ${MASON_PREFIX}/include/osmium +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/libosmium/2.15.2/.travis.yml b/scripts/libosmium/2.15.2/.travis.yml new file mode 100644 index 000000000..00c277c21 --- /dev/null +++ b/scripts/libosmium/2.15.2/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: linux + compiler: clang + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/libosmium/2.15.2/script.sh b/scripts/libosmium/2.15.2/script.sh new file mode 100755 index 000000000..29d748154 --- /dev/null +++ b/scripts/libosmium/2.15.2/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=libosmium +MASON_VERSION=2.15.2 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + 883eb9eeac9a8c30efa3d1415ab2f89128b705be + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/osmium ${MASON_PREFIX}/include/osmium +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/libosmium/2.15.3/.travis.yml b/scripts/libosmium/2.15.3/.travis.yml new file mode 100644 index 000000000..00c277c21 --- /dev/null +++ b/scripts/libosmium/2.15.3/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: linux + compiler: clang + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/libosmium/2.15.3/script.sh b/scripts/libosmium/2.15.3/script.sh new file mode 100755 index 000000000..497a9789d --- /dev/null +++ b/scripts/libosmium/2.15.3/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=libosmium +MASON_VERSION=2.15.3 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + fdce1f396b081383b026f3ab8285f14d2789c2e7 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/osmium ${MASON_PREFIX}/include/osmium +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/libosmium/2.15.5/.travis.yml b/scripts/libosmium/2.15.5/.travis.yml new file mode 100644 index 000000000..00c277c21 --- /dev/null +++ b/scripts/libosmium/2.15.5/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: linux + compiler: clang + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/libosmium/2.15.5/script.sh b/scripts/libosmium/2.15.5/script.sh new file mode 100755 index 000000000..5806b5c3a --- /dev/null +++ b/scripts/libosmium/2.15.5/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=libosmium +MASON_VERSION=2.15.5 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + 63d503581be4cccbe01d516646c2bfd1a81fb832 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/osmium ${MASON_PREFIX}/include/osmium +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/libosmium/2.15.6/.travis.yml b/scripts/libosmium/2.15.6/.travis.yml new file mode 100644 index 000000000..00c277c21 --- /dev/null +++ b/scripts/libosmium/2.15.6/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: linux + compiler: clang + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/libosmium/2.15.6/script.sh b/scripts/libosmium/2.15.6/script.sh new file mode 100755 index 000000000..43c774eff --- /dev/null +++ b/scripts/libosmium/2.15.6/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=libosmium +MASON_VERSION=2.15.6 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + 34c8407ec60515040d6c270e0051f573eb4a5d92 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/osmium ${MASON_PREFIX}/include/osmium +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/libpng/1.6.16/script.sh b/scripts/libpng/1.6.16/script.sh index 985ef42db..754d9b082 100755 --- a/scripts/libpng/1.6.16/script.sh +++ b/scripts/libpng/1.6.16/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpng.pc function mason_load_source { mason_download \ - http://prdownloads.sourceforge.net/libpng/libpng-1.6.16.tar.gz?download \ + https://prdownloads.sourceforge.net/libpng/libpng-1.6.16.tar.gz?download \ b0449a7d05447842f3f19642c2104e0a57db13a8 mason_extract_tar_gz diff --git a/scripts/libpng/1.6.17/script.sh b/scripts/libpng/1.6.17/script.sh index ceafc44ee..4df246781 100755 --- a/scripts/libpng/1.6.17/script.sh +++ b/scripts/libpng/1.6.17/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpng.pc function mason_load_source { mason_download \ - http://prdownloads.sourceforge.net/libpng/libpng-${MASON_VERSION}.tar.gz?download \ + https://prdownloads.sourceforge.net/libpng/libpng-${MASON_VERSION}.tar.gz?download \ ccc3b2243585a8aedf762fc72ffcc253aaed9298 mason_extract_tar_gz diff --git a/scripts/libpng/1.6.18/script.sh b/scripts/libpng/1.6.18/script.sh index 26a3c5b40..1454ce685 100755 --- a/scripts/libpng/1.6.18/script.sh +++ b/scripts/libpng/1.6.18/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpng.pc function mason_load_source { mason_download \ - http://prdownloads.sourceforge.net/libpng/libpng-${MASON_VERSION}.tar.gz?download \ + https://prdownloads.sourceforge.net/libpng/libpng-${MASON_VERSION}.tar.gz?download \ d9ed998cee89dc6ea3427c17882b5cfe7882429a mason_extract_tar_gz diff --git a/scripts/libpng/1.6.20/script.sh b/scripts/libpng/1.6.20/script.sh index d59501016..f3e067202 100755 --- a/scripts/libpng/1.6.20/script.sh +++ b/scripts/libpng/1.6.20/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpng.pc function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/libpng/libpng16/older-releases/${MASON_VERSION}/libpng-${MASON_VERSION}.tar.gz \ + https://downloads.sourceforge.net/project/libpng/libpng16/older-releases/${MASON_VERSION}/libpng-${MASON_VERSION}.tar.gz \ 0b5df1201ea4b63777a9c9c49ff26a45dd87890e mason_extract_tar_gz diff --git a/scripts/libpng/1.6.21/script.sh b/scripts/libpng/1.6.21/script.sh index 3f05c61b3..cecc83beb 100755 --- a/scripts/libpng/1.6.21/script.sh +++ b/scripts/libpng/1.6.21/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpng.pc function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/libpng/libpng16/${MASON_VERSION}/libpng-${MASON_VERSION}.tar.gz \ + https://downloads.sourceforge.net/project/libpng/libpng16/${MASON_VERSION}/libpng-${MASON_VERSION}.tar.gz \ 1604e875b732b08ae81e155259422f1c1407255d mason_extract_tar_gz diff --git a/scripts/libpng/1.6.24/script.sh b/scripts/libpng/1.6.24/script.sh index db1417b1d..25dff0b9b 100755 --- a/scripts/libpng/1.6.24/script.sh +++ b/scripts/libpng/1.6.24/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpng.pc function mason_load_source { mason_download \ - http://superb-sea2.dl.sourceforge.net/project/${MASON_NAME}/${MASON_NAME}16/${MASON_VERSION}/${MASON_NAME}-${MASON_VERSION}.tar.gz \ + https://superb-sea2.dl.sourceforge.net/project/${MASON_NAME}/${MASON_NAME}16/${MASON_VERSION}/${MASON_NAME}-${MASON_VERSION}.tar.gz \ ea724d11c25753ebad23ca0f63518b7f17c46a6a mason_extract_tar_gz diff --git a/scripts/libpng/1.6.25/script.sh b/scripts/libpng/1.6.25/script.sh index 6d100c543..aae243c1c 100755 --- a/scripts/libpng/1.6.25/script.sh +++ b/scripts/libpng/1.6.25/script.sh @@ -12,7 +12,7 @@ ZLIB_SHARED_VERSION=1.2.8 function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/libpng/libpng16/older-releases/${MASON_VERSION}/libpng-${MASON_VERSION}.tar.gz \ + https://downloads.sourceforge.net/project/libpng/libpng16/older-releases/${MASON_VERSION}/libpng-${MASON_VERSION}.tar.gz \ a88b710714a8e27e5e5aa52de28076860fc7748c mason_extract_tar_gz diff --git a/scripts/libpng/1.6.28/script.sh b/scripts/libpng/1.6.28/script.sh index 0f0b1e7ce..c16d22f25 100755 --- a/scripts/libpng/1.6.28/script.sh +++ b/scripts/libpng/1.6.28/script.sh @@ -12,7 +12,7 @@ ZLIB_SHARED_VERSION=1.2.8 function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/libpng/libpng16/older-releases/${MASON_VERSION}/libpng-${MASON_VERSION}.tar.gz \ + https://downloads.sourceforge.net/project/libpng/libpng16/older-releases/${MASON_VERSION}/libpng-${MASON_VERSION}.tar.gz \ a424121a192420e2fbbea20db1d13dca0c7c99ca mason_extract_tar_gz diff --git a/scripts/libpng/1.6.32/script.sh b/scripts/libpng/1.6.32/script.sh index c100298d4..499649226 100755 --- a/scripts/libpng/1.6.32/script.sh +++ b/scripts/libpng/1.6.32/script.sh @@ -12,7 +12,7 @@ ZLIB_SHARED_VERSION=1.2.8 function mason_load_source { mason_download \ - http://downloads.sourceforge.net/project/libpng/libpng16/${MASON_VERSION}/libpng-${MASON_VERSION}.tar.gz \ + https://downloads.sourceforge.net/project/libpng/libpng16/${MASON_VERSION}/libpng-${MASON_VERSION}.tar.gz \ 752b19285db1aab9d0b8f5ef2312390734f71e70 mason_extract_tar_gz diff --git a/scripts/libpq/10.3/.travis.yml b/scripts/libpq/10.3/.travis.yml new file mode 100644 index 000000000..8c71516f9 --- /dev/null +++ b/scripts/libpq/10.3/.travis.yml @@ -0,0 +1,13 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + compiler: clang + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/libpq/10.3/patch.diff b/scripts/libpq/10.3/patch.diff new file mode 100644 index 000000000..ae2f06a46 --- /dev/null +++ b/scripts/libpq/10.3/patch.diff @@ -0,0 +1,11 @@ +--- src/include/pg_config_manual.h 2013-10-07 20:17:38.000000000 -0700 ++++ src/include/pg_config_manual.h 2014-03-08 21:29:48.000000000 -0800 +@@ -144,7 +144,7 @@ + * here's where to twiddle it. You can also override this at runtime + * with the postmaster's -k switch. + */ +-#define DEFAULT_PGSOCKET_DIR "/tmp" ++#define DEFAULT_PGSOCKET_DIR "/var/run/postgresql" + + /* + * The random() function is expected to yield values between 0 and diff --git a/scripts/libpq/10.3/script.sh b/scripts/libpq/10.3/script.sh new file mode 100755 index 000000000..3a26de842 --- /dev/null +++ b/scripts/libpq/10.3/script.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +MASON_NAME=libpq +MASON_VERSION=10.3 +MASON_LIB_FILE=lib/libpq.a +MASON_PKGCONFIG_FILE=lib/pkgconfig/libpq.pc + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ + e1590a4b2167dcdf164eb887cf83e7da9e155771 + + mason_extract_tar_bz2 + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/postgresql-${MASON_VERSION} +} + +function mason_compile { + if [[ ${MASON_PLATFORM} == 'linux' ]]; then + mason_step "Loading patch" + patch src/include/pg_config_manual.h ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff + fi + + # note CFLAGS overrides defaults (-Wall -Wmissing-prototypes -Wpointer-arith -Wdeclaration-after-statement -Wendif-labels -Wmissing-format-attribute -Wformat-security -fno-strict-aliasing -fwrapv -Wno-unused-command-line-argument) so we need to add optimization flags back + export CFLAGS="${CFLAGS} -O3 -DNDEBUG -Wall -Wmissing-prototypes -Wpointer-arith -Wdeclaration-after-statement -Wendif-labels -Wmissing-format-attribute -Wformat-security -fno-strict-aliasing -fwrapv -Wno-unused-command-line-argument" + ./configure \ + --prefix=${MASON_PREFIX} \ + ${MASON_HOST_ARG} \ + --enable-thread-safety \ + --enable-largefile \ + --without-bonjour \ + --without-openssl \ + --without-pam \ + --without-krb5 \ + --without-gssapi \ + --without-ossp-uuid \ + --without-readline \ + --without-ldap \ + --without-zlib \ + --without-libxml \ + --without-libxslt \ + --without-selinux \ + --without-python \ + --without-perl \ + --without-tcl \ + --disable-rpath \ + --disable-debug \ + --disable-profiling \ + --disable-coverage \ + --disable-dtrace \ + --disable-depend \ + --disable-cassert + + make -j${MASON_CONCURRENCY} -C src/bin/pg_config install + make -j${MASON_CONCURRENCY} -C src/interfaces/libpq/ install + cp src/include/postgres_ext.h ${MASON_PREFIX}/include/ + cp src/include/pg_config_ext.h ${MASON_PREFIX}/include/ + rm -f ${MASON_PREFIX}/lib/libpq{*.so*,*.dylib} +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/libpq/9.4.0/script.sh b/scripts/libpq/9.4.0/script.sh index 8098cb726..c0abb02f0 100755 --- a/scripts/libpq/9.4.0/script.sh +++ b/scripts/libpq/9.4.0/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpq.pc function mason_load_source { mason_download \ - http://ftp.postgresql.org/pub/source/v9.4.0/postgresql-9.4.0.tar.bz2 \ + https://ftp.postgresql.org/pub/source/v9.4.0/postgresql-9.4.0.tar.bz2 \ d1cf3f96059532a99445e34a15cf0ef67f8da9c7 mason_extract_tar_bz2 diff --git a/scripts/libpq/9.4.1/script.sh b/scripts/libpq/9.4.1/script.sh index 4704bf921..83f53c015 100755 --- a/scripts/libpq/9.4.1/script.sh +++ b/scripts/libpq/9.4.1/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpq.pc function mason_load_source { mason_download \ - http://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ + https://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ 1fcc75dccdb9ffd3f30e723828cbfce00c9b13fd mason_extract_tar_bz2 diff --git a/scripts/libpq/9.5.2/script.sh b/scripts/libpq/9.5.2/script.sh index bd899a306..eeeaf1328 100755 --- a/scripts/libpq/9.5.2/script.sh +++ b/scripts/libpq/9.5.2/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpq.pc function mason_load_source { mason_download \ - http://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ + https://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ 9c7bd5c1c601075ff6d5ea7615f9461d5b1f4c88 mason_extract_tar_bz2 diff --git a/scripts/libpq/9.6.1/script.sh b/scripts/libpq/9.6.1/script.sh index ce3bc4f33..489f7909e 100755 --- a/scripts/libpq/9.6.1/script.sh +++ b/scripts/libpq/9.6.1/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpq.pc function mason_load_source { mason_download \ - http://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ + https://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ 6aef3fb521aaf987a9363a314ff7d5539b6601cd mason_extract_tar_bz2 diff --git a/scripts/libpq/9.6.2/script.sh b/scripts/libpq/9.6.2/script.sh index e0361dd44..dc8604d89 100755 --- a/scripts/libpq/9.6.2/script.sh +++ b/scripts/libpq/9.6.2/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpq.pc function mason_load_source { mason_download \ - http://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ + https://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ 183f73527051430934a20bf08646b16373cddcca mason_extract_tar_bz2 diff --git a/scripts/libpq/9.6.5/script.sh b/scripts/libpq/9.6.5/script.sh index 5236c12e9..03496dc84 100755 --- a/scripts/libpq/9.6.5/script.sh +++ b/scripts/libpq/9.6.5/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpq.pc function mason_load_source { mason_download \ - http://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ + https://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ de4007bbb8a5869cc3f193ae34b2fbd9e4b876c4 mason_extract_tar_bz2 diff --git a/scripts/libshp2/1.3.0/script.sh b/scripts/libshp2/1.3.0/script.sh index a3b2c9486..8bc4cbdc6 100755 --- a/scripts/libshp2/1.3.0/script.sh +++ b/scripts/libshp2/1.3.0/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libshp.a function mason_load_source { mason_download \ - http://download.osgeo.org/shapelib/shapelib-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/shapelib/shapelib-${MASON_VERSION}.tar.gz \ 4b3cc10fd5ac228d749ab0a19d485b475b7d5fb5 mason_extract_tar_gz diff --git a/scripts/libtiff/4.0.4beta/script.sh b/scripts/libtiff/4.0.4beta/script.sh index 9ef375b62..f42cb4b78 100755 --- a/scripts/libtiff/4.0.4beta/script.sh +++ b/scripts/libtiff/4.0.4beta/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libtiff-4.pc function mason_load_source { mason_download \ - http://download.osgeo.org/libtiff/tiff-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/libtiff/tiff-${MASON_VERSION}.tar.gz \ 7bbd91b09cef1a4c29d3cccb7e656ee32587e5ef mason_extract_tar_gz diff --git a/scripts/libtiff/4.0.6/script.sh b/scripts/libtiff/4.0.6/script.sh index 45f98b84b..b6142da0d 100755 --- a/scripts/libtiff/4.0.6/script.sh +++ b/scripts/libtiff/4.0.6/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libtiff-4.pc function mason_load_source { mason_download \ - http://download.osgeo.org/libtiff/tiff-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/libtiff/tiff-${MASON_VERSION}.tar.gz \ a6c275bb0a444f9b43f5cd3f15e0400599dc5ffc mason_extract_tar_gz diff --git a/scripts/libtiff/4.0.7/script.sh b/scripts/libtiff/4.0.7/script.sh index cf17b39d6..deb5b58b3 100755 --- a/scripts/libtiff/4.0.7/script.sh +++ b/scripts/libtiff/4.0.7/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libtiff-4.pc function mason_load_source { mason_download \ - http://download.osgeo.org/libtiff/tiff-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/libtiff/tiff-${MASON_VERSION}.tar.gz \ 3ef673aa786929fea2f997439e33473777465927 mason_extract_tar_gz diff --git a/scripts/libtiff/4.0.8/script.sh b/scripts/libtiff/4.0.8/script.sh index f43721014..638443dda 100755 --- a/scripts/libtiff/4.0.8/script.sh +++ b/scripts/libtiff/4.0.8/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libtiff-4.pc function mason_load_source { mason_download \ - http://download.osgeo.org/libtiff/tiff-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/libtiff/tiff-${MASON_VERSION}.tar.gz \ eb172e79e887fc3b9e4b3e9639c49182ffe563a0 mason_extract_tar_gz diff --git a/scripts/libzip/0.11.2/script.sh b/scripts/libzip/0.11.2/script.sh index c900b853e..f68b0ad7e 100755 --- a/scripts/libzip/0.11.2/script.sh +++ b/scripts/libzip/0.11.2/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libzip.pc function mason_load_source { mason_download \ - http://www.nih.at/libzip/libzip-0.11.2.tar.gz \ + https://www.nih.at/libzip/libzip-0.11.2.tar.gz \ 5e2407b231390e1cb8234541e89693ae57487170 mason_extract_tar_gz diff --git a/scripts/libzip/1.0.1/script.sh b/scripts/libzip/1.0.1/script.sh index a3a98afc9..baf9496c0 100755 --- a/scripts/libzip/1.0.1/script.sh +++ b/scripts/libzip/1.0.1/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libzip.pc function mason_load_source { mason_download \ - http://www.nih.at/libzip/libzip-${MASON_VERSION}.tar.gz \ + https://www.nih.at/libzip/libzip-${MASON_VERSION}.tar.gz \ b7761ee2ef581979df32f42637042f5663d766bf mason_extract_tar_gz diff --git a/scripts/libzip/1.1.3/script.sh b/scripts/libzip/1.1.3/script.sh index 1655ddd2c..8c15dfc98 100755 --- a/scripts/libzip/1.1.3/script.sh +++ b/scripts/libzip/1.1.3/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libzip.pc function mason_load_source { mason_download \ - http://www.nih.at/libzip/libzip-${MASON_VERSION}.tar.gz \ + https://libzip.org/download/libzip-${MASON_VERSION}.tar.gz \ 4bc18317d0607d5a24b618a6a5c1c229dade48e8 mason_extract_tar_gz diff --git a/scripts/lldb/10.0.0/.travis.yml b/scripts/lldb/10.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/lldb/10.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/lldb/10.0.0/script.sh b/scripts/lldb/10.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/lldb/10.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/lldb/11.0.0/.travis.yml b/scripts/lldb/11.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/lldb/11.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/lldb/11.0.0/script.sh b/scripts/lldb/11.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/lldb/11.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/lldb/6.0.0/script.sh b/scripts/lldb/6.0.0/script.sh index 391259e3c..c21f18d8a 100755 --- a/scripts/lldb/6.0.0/script.sh +++ b/scripts/lldb/6.0.0/script.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + # dynamically determine the path to this package HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" diff --git a/scripts/lldb/6.0.1/.travis.yml b/scripts/lldb/6.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/lldb/6.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/lldb/6.0.1/script.sh b/scripts/lldb/6.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/lldb/6.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/lldb/7.0.0/script.sh b/scripts/lldb/7.0.0/script.sh index 391259e3c..c21f18d8a 100755 --- a/scripts/lldb/7.0.0/script.sh +++ b/scripts/lldb/7.0.0/script.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + # dynamically determine the path to this package HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" diff --git a/scripts/lldb/7.0.1/.travis.yml b/scripts/lldb/7.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/lldb/7.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/lldb/7.0.1/script.sh b/scripts/lldb/7.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/lldb/7.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/lldb/8.0.0/.travis.yml b/scripts/lldb/8.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/lldb/8.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/lldb/8.0.0/script.sh b/scripts/lldb/8.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/lldb/8.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/lldb/9.0.0/.travis.yml b/scripts/lldb/9.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/lldb/9.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/lldb/9.0.0/script.sh b/scripts/lldb/9.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/lldb/9.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/lldb/9.0.1/.travis.yml b/scripts/lldb/9.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/lldb/9.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/lldb/9.0.1/script.sh b/scripts/lldb/9.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/lldb/9.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/llnode/1.4.1/script.sh b/scripts/llnode/1.4.1/script.sh index 2c3da09db..b4d0cc934 100755 --- a/scripts/llnode/1.4.1/script.sh +++ b/scripts/llnode/1.4.1/script.sh @@ -26,7 +26,7 @@ function mason_compile { git clone --depth 1 https://chromium.googlesource.com/external/gyp.git tools/gyp # ../src/llv8.cc:256:43: error: expected ')' #snprintf(tmp, sizeof(tmp), " fn=0x%016" PRIx64, fn.raw()); - # need to define STDC macros since libc++ adheres to spec: http://en.cppreference.com/w/cpp/types/integer + # need to define STDC macros since libc++ adheres to spec: https://en.cppreference.com/w/cpp/types/integer export CXXFLAGS="-stdlib=libc++ ${CXXFLAGS} -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS" export LDFLAGS="-stdlib=libc++ ${LDFLAGS}" # per the llvm package, on linux we statically link libc++ for full portability diff --git a/scripts/llnode/1.7.1/.travis.yml b/scripts/llnode/1.7.1/.travis.yml new file mode 100644 index 000000000..8c71516f9 --- /dev/null +++ b/scripts/llnode/1.7.1/.travis.yml @@ -0,0 +1,13 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + compiler: clang + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/llnode/1.7.1/patch.diff b/scripts/llnode/1.7.1/patch.diff new file mode 100644 index 000000000..1c6099683 --- /dev/null +++ b/scripts/llnode/1.7.1/patch.diff @@ -0,0 +1,39 @@ +diff --git a/binding.gyp b/binding.gyp +index 78864d4..676bce5 100644 +--- a/binding.gyp ++++ b/binding.gyp +@@ -23,33 +23,8 @@ + # Necessary for node v4.x + "xcode_settings": { + "OTHER_CPLUSPLUSFLAGS" : [ "-std=c++11", "-stdlib=libc++" ], +- "OTHER_LDFLAGS": [ "-stdlib=libc++" ], ++ "OTHER_LDFLAGS": [ "-stdlib=libc++","-Wl,-undefined,dynamic_lookup" ], + }, +- +- "conditions": [ +- [ "lldb_lib_dir == ''", { +- "variables": { +- "mac_shared_frameworks": "/Applications/Xcode.app/Contents/SharedFrameworks", +- }, +- "xcode_settings": { +- "OTHER_LDFLAGS": [ +- "-F<(mac_shared_frameworks)", +- "-Wl,-rpath,<(mac_shared_frameworks)", +- "-framework LLDB", +- ], +- }, +- }, +- # lldb_lib_dir != "" +- { +- "xcode_settings": { +- "OTHER_LDFLAGS": [ +- "-Wl,-rpath,<(lldb_lib_dir)", +- "-L<(lldb_lib_dir)", +- "-l<(lldb_lib)", +- ], +- }, +- }], +- ], + }] + ] + }, diff --git a/scripts/llnode/1.7.1/script.sh b/scripts/llnode/1.7.1/script.sh new file mode 100755 index 000000000..b4056db63 --- /dev/null +++ b/scripts/llnode/1.7.1/script.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash + +MASON_NAME=llnode +MASON_VERSION=1.7.1 +MASON_LIB_FILE=lib/plugin.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/nodejs/llnode/archive/v${MASON_VERSION}.tar.gz \ + 1bc3ff2925770b42f3b93995e1d67a3f3b547d93 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_prepare_compile { + LLVM_VERSION=6.0.1 + ${MASON_DIR}/mason install llvm ${LLVM_VERSION} + LLVM_PATH=$(${MASON_DIR}/mason prefix llvm ${LLVM_VERSION}) + # needed for node-gyp + NODE_VERSION=6.14.3 + ${MASON_DIR}/mason install node ${NODE_VERSION} + export NODE_PATH=$(${MASON_DIR}/mason prefix node ${NODE_VERSION}) + echo `which node` + echo `which npm` +} + +function mason_compile { + # ../src/llv8.cc:256:43: error: expected ')' + #snprintf(tmp, sizeof(tmp), " fn=0x%016" PRIx64, fn.raw()); + # need to define STDC macros since libc++ adheres to spec: https://en.cppreference.com/w/cpp/types/integer + export CXXFLAGS="-stdlib=libc++ ${CXXFLAGS} -I${LLVM_PATH}/include -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS" + export LDFLAGS="-stdlib=libc++ ${LDFLAGS}" + export CXX="${LLVM_PATH}/bin/clang++" + patch -N -p1 < ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff + # per the llvm package, on linux we statically link libc++ for full portability + # while on osx we use the system libc++ + if [[ $(uname -s) == 'Linux' ]] && [[ -f ${LLVM_PATH}/lib/libc++.a ]]; then + export LDFLAGS="-Wl,--whole-archive ${LLVM_PATH}/lib/libc++.a ${LLVM_PATH}/lib/libc++abi.a ${LDFLAGS}" + fi + if [[ $(uname -s) == 'Darwin' ]] && [[ -f ${LLVM_PATH}/lib/libc++.a ]]; then + export LDFLAGS="${LLVM_PATH}/lib/libc++.a ${LLVM_PATH}/lib/libc++abi.a ${LDFLAGS}" + fi + echo '{' > config.gypi + echo "'lldb_header_dir':'${LLVM_PATH}/include'," >> config.gypi + echo "'lldb_lib_dir':'${LLVM_PATH}/lib'" >> config.gypi + echo '}' >> config.gypi + ${NODE_PATH}/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js configure --clang=1 -- -Dlldb_lib_dir=${LLVM_PATH}/lib + V=1 ${NODE_PATH}/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js build --clang=1 + ls build/Release/* + mkdir -p ${MASON_PREFIX}/lib + cp build/Release/plugin.* ${MASON_PREFIX}/lib/ +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/llvm-cov/10.0.0/.travis.yml b/scripts/llvm-cov/10.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/llvm-cov/10.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/llvm-cov/10.0.0/script.sh b/scripts/llvm-cov/10.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/llvm-cov/10.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/llvm-cov/11.0.0/.travis.yml b/scripts/llvm-cov/11.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/llvm-cov/11.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/llvm-cov/11.0.0/script.sh b/scripts/llvm-cov/11.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/llvm-cov/11.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/llvm-cov/6.0.0/script.sh b/scripts/llvm-cov/6.0.0/script.sh index 391259e3c..c21f18d8a 100755 --- a/scripts/llvm-cov/6.0.0/script.sh +++ b/scripts/llvm-cov/6.0.0/script.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + # dynamically determine the path to this package HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" diff --git a/scripts/llvm-cov/6.0.1/.travis.yml b/scripts/llvm-cov/6.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/llvm-cov/6.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/llvm-cov/6.0.1/script.sh b/scripts/llvm-cov/6.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/llvm-cov/6.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/llvm-cov/7.0.0/script.sh b/scripts/llvm-cov/7.0.0/script.sh index 391259e3c..c21f18d8a 100755 --- a/scripts/llvm-cov/7.0.0/script.sh +++ b/scripts/llvm-cov/7.0.0/script.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + # dynamically determine the path to this package HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" diff --git a/scripts/llvm-cov/7.0.1/.travis.yml b/scripts/llvm-cov/7.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/llvm-cov/7.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/llvm-cov/7.0.1/script.sh b/scripts/llvm-cov/7.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/llvm-cov/7.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/llvm-cov/8.0.0/.travis.yml b/scripts/llvm-cov/8.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/llvm-cov/8.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/llvm-cov/8.0.0/script.sh b/scripts/llvm-cov/8.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/llvm-cov/8.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/llvm-cov/9.0.0/.travis.yml b/scripts/llvm-cov/9.0.0/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/llvm-cov/9.0.0/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/llvm-cov/9.0.0/script.sh b/scripts/llvm-cov/9.0.0/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/llvm-cov/9.0.0/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/llvm-cov/9.0.1/.travis.yml b/scripts/llvm-cov/9.0.1/.travis.yml new file mode 100644 index 000000000..d22a82784 --- /dev/null +++ b/scripts/llvm-cov/9.0.1/.travis.yml @@ -0,0 +1,11 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/llvm-cov/9.0.1/script.sh b/scripts/llvm-cov/9.0.1/script.sh new file mode 100755 index 000000000..c21f18d8a --- /dev/null +++ b/scripts/llvm-cov/9.0.1/script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" + +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +MASON_LIB_FILE=bin/${MASON_NAME} + +. ${MASON_DIR}/mason.sh + +# inherit all functions from base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +mason_run "$@" diff --git a/scripts/llvm/10.0.0/.travis.yml b/scripts/llvm/10.0.0/.travis.yml new file mode 100644 index 000000000..cc9a85738 --- /dev/null +++ b/scripts/llvm/10.0.0/.travis.yml @@ -0,0 +1,4 @@ +language: generic + +script: +- echo "nothing to do since travis cannot compile something as large as llvm" diff --git a/scripts/llvm/10.0.0/README.md b/scripts/llvm/10.0.0/README.md new file mode 100644 index 000000000..425bf0d1e --- /dev/null +++ b/scripts/llvm/10.0.0/README.md @@ -0,0 +1 @@ +For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md diff --git a/scripts/llvm/10.0.0/libcxx.diff b/scripts/llvm/10.0.0/libcxx.diff new file mode 100644 index 000000000..348556360 --- /dev/null +++ b/scripts/llvm/10.0.0/libcxx.diff @@ -0,0 +1,51 @@ +diff --git a/src/experimental/filesystem/operations.cpp b/src/experimental/filesystem/operations.cpp +index 2bc28c21d..bd173893c 100644 +--- a/src/experimental/filesystem/operations.cpp ++++ b/src/experimental/filesystem/operations.cpp +@@ -21,7 +21,34 @@ + #include + #include + #include /* values for fchmodat */ +-#if !defined(UTIME_OMIT) ++ ++#if (__APPLE__) ++#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101300 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ >= 40000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __ENVIRONMENT_.*_VERSION_MIN_REQUIRED__ ++#else ++// We can use the presence of UTIME_OMIT to detect platforms that provide ++// utimensat. ++#if defined(UTIME_OMIT) ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __APPLE__ ++ ++#if !defined(_LIBCXX_USE_UTIMENSAT) + #include // for ::utimes as used in __last_write_time + #endif + +@@ -560,9 +587,7 @@ void __last_write_time(const path& p, file_time_type new_time, + using namespace std::chrono; + std::error_code m_ec; + +- // We can use the presence of UTIME_OMIT to detect platforms that do not +- // provide utimensat. +-#if !defined(UTIME_OMIT) ++#if !defined(_LIBCXX_USE_UTIMENSAT) + // This implementation has a race condition between determining the + // last access time and attempting to set it to the same value using + // ::utimes diff --git a/scripts/llvm/10.0.0/script.sh b/scripts/llvm/10.0.0/script.sh new file mode 100755 index 000000000..980e4b461 --- /dev/null +++ b/scripts/llvm/10.0.0/script.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +# inherit all functions from llvm base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +function setup_release() { + : + # broken https://github.com/include-what-you-use/include-what-you-use/issues/592 + # get_llvm_project "https://github.com/include-what-you-use/include-what-you-use.git" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use "" 569a7ee + #get_llvm_project "https://github.com/include-what-you-use/include-what-you-use/archive/clang_${MAJOR_MINOR}.tar.gz" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use +} + +mason_run "$@" diff --git a/scripts/llvm/11.0.0/.travis.yml b/scripts/llvm/11.0.0/.travis.yml new file mode 100644 index 000000000..cc9a85738 --- /dev/null +++ b/scripts/llvm/11.0.0/.travis.yml @@ -0,0 +1,4 @@ +language: generic + +script: +- echo "nothing to do since travis cannot compile something as large as llvm" diff --git a/scripts/llvm/11.0.0/README.md b/scripts/llvm/11.0.0/README.md new file mode 100644 index 000000000..425bf0d1e --- /dev/null +++ b/scripts/llvm/11.0.0/README.md @@ -0,0 +1 @@ +For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md diff --git a/scripts/llvm/11.0.0/libcxx.diff b/scripts/llvm/11.0.0/libcxx.diff new file mode 100644 index 000000000..348556360 --- /dev/null +++ b/scripts/llvm/11.0.0/libcxx.diff @@ -0,0 +1,51 @@ +diff --git a/src/experimental/filesystem/operations.cpp b/src/experimental/filesystem/operations.cpp +index 2bc28c21d..bd173893c 100644 +--- a/src/experimental/filesystem/operations.cpp ++++ b/src/experimental/filesystem/operations.cpp +@@ -21,7 +21,34 @@ + #include + #include + #include /* values for fchmodat */ +-#if !defined(UTIME_OMIT) ++ ++#if (__APPLE__) ++#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101300 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ >= 40000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __ENVIRONMENT_.*_VERSION_MIN_REQUIRED__ ++#else ++// We can use the presence of UTIME_OMIT to detect platforms that provide ++// utimensat. ++#if defined(UTIME_OMIT) ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __APPLE__ ++ ++#if !defined(_LIBCXX_USE_UTIMENSAT) + #include // for ::utimes as used in __last_write_time + #endif + +@@ -560,9 +587,7 @@ void __last_write_time(const path& p, file_time_type new_time, + using namespace std::chrono; + std::error_code m_ec; + +- // We can use the presence of UTIME_OMIT to detect platforms that do not +- // provide utimensat. +-#if !defined(UTIME_OMIT) ++#if !defined(_LIBCXX_USE_UTIMENSAT) + // This implementation has a race condition between determining the + // last access time and attempting to set it to the same value using + // ::utimes diff --git a/scripts/llvm/11.0.0/script.sh b/scripts/llvm/11.0.0/script.sh new file mode 100755 index 000000000..980e4b461 --- /dev/null +++ b/scripts/llvm/11.0.0/script.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +# inherit all functions from llvm base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +function setup_release() { + : + # broken https://github.com/include-what-you-use/include-what-you-use/issues/592 + # get_llvm_project "https://github.com/include-what-you-use/include-what-you-use.git" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use "" 569a7ee + #get_llvm_project "https://github.com/include-what-you-use/include-what-you-use/archive/clang_${MAJOR_MINOR}.tar.gz" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use +} + +mason_run "$@" diff --git a/scripts/llvm/6.0.0/README.md b/scripts/llvm/6.0.0/README.md index 6ce52d7fd..425bf0d1e 100644 --- a/scripts/llvm/6.0.0/README.md +++ b/scripts/llvm/6.0.0/README.md @@ -1,3 +1 @@ -### llvm v4.x - -Development package of llvm git head \ No newline at end of file +For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md diff --git a/scripts/llvm/6.0.0/libcxx.diff b/scripts/llvm/6.0.0/libcxx.diff new file mode 100644 index 000000000..348556360 --- /dev/null +++ b/scripts/llvm/6.0.0/libcxx.diff @@ -0,0 +1,51 @@ +diff --git a/src/experimental/filesystem/operations.cpp b/src/experimental/filesystem/operations.cpp +index 2bc28c21d..bd173893c 100644 +--- a/src/experimental/filesystem/operations.cpp ++++ b/src/experimental/filesystem/operations.cpp +@@ -21,7 +21,34 @@ + #include + #include + #include /* values for fchmodat */ +-#if !defined(UTIME_OMIT) ++ ++#if (__APPLE__) ++#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101300 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ >= 40000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __ENVIRONMENT_.*_VERSION_MIN_REQUIRED__ ++#else ++// We can use the presence of UTIME_OMIT to detect platforms that provide ++// utimensat. ++#if defined(UTIME_OMIT) ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __APPLE__ ++ ++#if !defined(_LIBCXX_USE_UTIMENSAT) + #include // for ::utimes as used in __last_write_time + #endif + +@@ -560,9 +587,7 @@ void __last_write_time(const path& p, file_time_type new_time, + using namespace std::chrono; + std::error_code m_ec; + +- // We can use the presence of UTIME_OMIT to detect platforms that do not +- // provide utimensat. +-#if !defined(UTIME_OMIT) ++#if !defined(_LIBCXX_USE_UTIMENSAT) + // This implementation has a race condition between determining the + // last access time and attempting to set it to the same value using + // ::utimes diff --git a/scripts/llvm/6.0.0/script.sh b/scripts/llvm/6.0.0/script.sh index 5087ab6d1..b23b2c26f 100755 --- a/scripts/llvm/6.0.0/script.sh +++ b/scripts/llvm/6.0.0/script.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + # dynamically determine the path to this package HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" # dynamically take name of package from directory @@ -9,19 +11,18 @@ MASON_VERSION=$(basename $HERE) # inherit all functions from llvm base source ${HERE}/../../${MASON_NAME}/base/common.sh -function setup_base_tools() { - get_llvm_project "http://llvm.org/git/llvm.git" ${MASON_BUILD_PATH} - get_llvm_project "http://llvm.org/git/clang.git" ${MASON_BUILD_PATH}/tools/clang - get_llvm_project "http://llvm.org/git/compiler-rt.git" ${MASON_BUILD_PATH}/projects/compiler-rt - if [[ ${BUILD_AND_LINK_LIBCXX} == true ]]; then - get_llvm_project "http://llvm.org/git/libcxx.git" ${MASON_BUILD_PATH}/projects/libcxx - get_llvm_project "http://llvm.org/git/libcxxabi.git" ${MASON_BUILD_PATH}/projects/libcxxabi - get_llvm_project "http://llvm.org/git/libunwind.git" ${MASON_BUILD_PATH}/projects/libunwind - fi - get_llvm_project "http://llvm.org/git/lld.git" ${MASON_BUILD_PATH}/tools/lld - get_llvm_project "http://llvm.org/git/clang-tools-extra.git" ${MASON_BUILD_PATH}/tools/clang/tools/extra - get_llvm_project "http://llvm.org/git/lldb.git" ${MASON_BUILD_PATH}/tools/lldb - get_llvm_project "https://github.com/include-what-you-use/include-what-you-use.git" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use +# broken with: +# ../tools/clang/tools/include-what-you-use/iwyu_ast_util.cc:455:3: error: use of undeclared identifier 'printTemplateArgumentList' +# function setup_release() { +# get_llvm_project "https://github.com/include-what-you-use/include-what-you-use.git" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use "" 5788b34c2e22fa97630c4a5b1153d828698f9ac1 +# } + +function setup_release() { + #get_llvm_project "https://github.com/include-what-you-use/include-what-you-use.git" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use "" f1ec249 + # FIX 6.0.0 specific libcxx bug: https://github.com/llvm-mirror/libcxx/commit/68b20ca4d9c4bee2c2ad5a9240599b3e4b78d0ba + # This will need to be removed in upcoming releases + (cd ${MASON_BUILD_PATH}/projects/libcxx && + patch -N -p1 < ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/libcxx.diff) } mason_run "$@" diff --git a/scripts/llvm/6.0.1/.travis.yml b/scripts/llvm/6.0.1/.travis.yml new file mode 100644 index 000000000..cc9a85738 --- /dev/null +++ b/scripts/llvm/6.0.1/.travis.yml @@ -0,0 +1,4 @@ +language: generic + +script: +- echo "nothing to do since travis cannot compile something as large as llvm" diff --git a/scripts/llvm/6.0.1/README.md b/scripts/llvm/6.0.1/README.md new file mode 100644 index 000000000..425bf0d1e --- /dev/null +++ b/scripts/llvm/6.0.1/README.md @@ -0,0 +1 @@ +For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md diff --git a/scripts/llvm/6.0.1/libcxx.diff b/scripts/llvm/6.0.1/libcxx.diff new file mode 100644 index 000000000..348556360 --- /dev/null +++ b/scripts/llvm/6.0.1/libcxx.diff @@ -0,0 +1,51 @@ +diff --git a/src/experimental/filesystem/operations.cpp b/src/experimental/filesystem/operations.cpp +index 2bc28c21d..bd173893c 100644 +--- a/src/experimental/filesystem/operations.cpp ++++ b/src/experimental/filesystem/operations.cpp +@@ -21,7 +21,34 @@ + #include + #include + #include /* values for fchmodat */ +-#if !defined(UTIME_OMIT) ++ ++#if (__APPLE__) ++#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101300 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ >= 40000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __ENVIRONMENT_.*_VERSION_MIN_REQUIRED__ ++#else ++// We can use the presence of UTIME_OMIT to detect platforms that provide ++// utimensat. ++#if defined(UTIME_OMIT) ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __APPLE__ ++ ++#if !defined(_LIBCXX_USE_UTIMENSAT) + #include // for ::utimes as used in __last_write_time + #endif + +@@ -560,9 +587,7 @@ void __last_write_time(const path& p, file_time_type new_time, + using namespace std::chrono; + std::error_code m_ec; + +- // We can use the presence of UTIME_OMIT to detect platforms that do not +- // provide utimensat. +-#if !defined(UTIME_OMIT) ++#if !defined(_LIBCXX_USE_UTIMENSAT) + // This implementation has a race condition between determining the + // last access time and attempting to set it to the same value using + // ::utimes diff --git a/scripts/llvm/6.0.1/script.sh b/scripts/llvm/6.0.1/script.sh new file mode 100755 index 000000000..e765acea6 --- /dev/null +++ b/scripts/llvm/6.0.1/script.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +# inherit all functions from llvm base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +function setup_release() { + get_llvm_project "https://github.com/include-what-you-use/include-what-you-use/archive/clang_${MAJOR_MINOR}.tar.gz" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use + # FIX 6.0.0 specific libcxx bug: https://github.com/llvm-mirror/libcxx/commit/68b20ca4d9c4bee2c2ad5a9240599b3e4b78d0ba + # This will likely need to be removed in upcoming releases + (cd ${MASON_BUILD_PATH}/projects/libcxx && + patch -N -p1 < ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/libcxx.diff) +} + +mason_run "$@" diff --git a/scripts/llvm/7.0.0/README.md b/scripts/llvm/7.0.0/README.md index 6f5480a6f..425bf0d1e 100644 --- a/scripts/llvm/7.0.0/README.md +++ b/scripts/llvm/7.0.0/README.md @@ -1,3 +1 @@ -### llvm v7.x - -Development package of llvm git head \ No newline at end of file +For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md diff --git a/scripts/llvm/7.0.0/libcxx.diff b/scripts/llvm/7.0.0/libcxx.diff new file mode 100644 index 000000000..348556360 --- /dev/null +++ b/scripts/llvm/7.0.0/libcxx.diff @@ -0,0 +1,51 @@ +diff --git a/src/experimental/filesystem/operations.cpp b/src/experimental/filesystem/operations.cpp +index 2bc28c21d..bd173893c 100644 +--- a/src/experimental/filesystem/operations.cpp ++++ b/src/experimental/filesystem/operations.cpp +@@ -21,7 +21,34 @@ + #include + #include + #include /* values for fchmodat */ +-#if !defined(UTIME_OMIT) ++ ++#if (__APPLE__) ++#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101300 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ >= 40000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __ENVIRONMENT_.*_VERSION_MIN_REQUIRED__ ++#else ++// We can use the presence of UTIME_OMIT to detect platforms that provide ++// utimensat. ++#if defined(UTIME_OMIT) ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __APPLE__ ++ ++#if !defined(_LIBCXX_USE_UTIMENSAT) + #include // for ::utimes as used in __last_write_time + #endif + +@@ -560,9 +587,7 @@ void __last_write_time(const path& p, file_time_type new_time, + using namespace std::chrono; + std::error_code m_ec; + +- // We can use the presence of UTIME_OMIT to detect platforms that do not +- // provide utimensat. +-#if !defined(UTIME_OMIT) ++#if !defined(_LIBCXX_USE_UTIMENSAT) + // This implementation has a race condition between determining the + // last access time and attempting to set it to the same value using + // ::utimes diff --git a/scripts/llvm/7.0.0/script.sh b/scripts/llvm/7.0.0/script.sh index 0c3081095..980e4b461 100755 --- a/scripts/llvm/7.0.0/script.sh +++ b/scripts/llvm/7.0.0/script.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + # dynamically determine the path to this package HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" # dynamically take name of package from directory @@ -9,21 +11,11 @@ MASON_VERSION=$(basename $HERE) # inherit all functions from llvm base source ${HERE}/../../${MASON_NAME}/base/common.sh -function setup_base_tools() { - get_llvm_project "http://llvm.org/git/llvm.git" ${MASON_BUILD_PATH} - get_llvm_project "http://llvm.org/git/clang.git" ${MASON_BUILD_PATH}/tools/clang - get_llvm_project "http://llvm.org/git/compiler-rt.git" ${MASON_BUILD_PATH}/projects/compiler-rt - if [[ ${BUILD_AND_LINK_LIBCXX} == true ]]; then - get_llvm_project "http://llvm.org/git/libcxx.git" ${MASON_BUILD_PATH}/projects/libcxx - get_llvm_project "http://llvm.org/git/libcxxabi.git" ${MASON_BUILD_PATH}/projects/libcxxabi - get_llvm_project "http://llvm.org/git/libunwind.git" ${MASON_BUILD_PATH}/projects/libunwind - fi - get_llvm_project "http://llvm.org/git/openmp.git" ${MASON_BUILD_PATH}/projects/openmp - get_llvm_project "http://llvm.org/git/lld.git" ${MASON_BUILD_PATH}/tools/lld - get_llvm_project "http://llvm.org/git/clang-tools-extra.git" ${MASON_BUILD_PATH}/tools/clang/tools/extra - get_llvm_project "http://llvm.org/git/lldb.git" ${MASON_BUILD_PATH}/tools/lldb - get_llvm_project "http://llvm.org/git/polly.git" ${MASON_BUILD_PATH}/tools/polly - get_llvm_project "https://github.com/include-what-you-use/include-what-you-use.git" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use +function setup_release() { + : + # broken https://github.com/include-what-you-use/include-what-you-use/issues/592 + # get_llvm_project "https://github.com/include-what-you-use/include-what-you-use.git" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use "" 569a7ee + #get_llvm_project "https://github.com/include-what-you-use/include-what-you-use/archive/clang_${MAJOR_MINOR}.tar.gz" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use } mason_run "$@" diff --git a/scripts/llvm/7.0.1/.travis.yml b/scripts/llvm/7.0.1/.travis.yml new file mode 100644 index 000000000..cc9a85738 --- /dev/null +++ b/scripts/llvm/7.0.1/.travis.yml @@ -0,0 +1,4 @@ +language: generic + +script: +- echo "nothing to do since travis cannot compile something as large as llvm" diff --git a/scripts/llvm/7.0.1/README.md b/scripts/llvm/7.0.1/README.md new file mode 100644 index 000000000..425bf0d1e --- /dev/null +++ b/scripts/llvm/7.0.1/README.md @@ -0,0 +1 @@ +For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md diff --git a/scripts/llvm/7.0.1/libcxx.diff b/scripts/llvm/7.0.1/libcxx.diff new file mode 100644 index 000000000..348556360 --- /dev/null +++ b/scripts/llvm/7.0.1/libcxx.diff @@ -0,0 +1,51 @@ +diff --git a/src/experimental/filesystem/operations.cpp b/src/experimental/filesystem/operations.cpp +index 2bc28c21d..bd173893c 100644 +--- a/src/experimental/filesystem/operations.cpp ++++ b/src/experimental/filesystem/operations.cpp +@@ -21,7 +21,34 @@ + #include + #include + #include /* values for fchmodat */ +-#if !defined(UTIME_OMIT) ++ ++#if (__APPLE__) ++#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101300 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ >= 40000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __ENVIRONMENT_.*_VERSION_MIN_REQUIRED__ ++#else ++// We can use the presence of UTIME_OMIT to detect platforms that provide ++// utimensat. ++#if defined(UTIME_OMIT) ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __APPLE__ ++ ++#if !defined(_LIBCXX_USE_UTIMENSAT) + #include // for ::utimes as used in __last_write_time + #endif + +@@ -560,9 +587,7 @@ void __last_write_time(const path& p, file_time_type new_time, + using namespace std::chrono; + std::error_code m_ec; + +- // We can use the presence of UTIME_OMIT to detect platforms that do not +- // provide utimensat. +-#if !defined(UTIME_OMIT) ++#if !defined(_LIBCXX_USE_UTIMENSAT) + // This implementation has a race condition between determining the + // last access time and attempting to set it to the same value using + // ::utimes diff --git a/scripts/llvm/7.0.1/script.sh b/scripts/llvm/7.0.1/script.sh new file mode 100755 index 000000000..980e4b461 --- /dev/null +++ b/scripts/llvm/7.0.1/script.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +# inherit all functions from llvm base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +function setup_release() { + : + # broken https://github.com/include-what-you-use/include-what-you-use/issues/592 + # get_llvm_project "https://github.com/include-what-you-use/include-what-you-use.git" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use "" 569a7ee + #get_llvm_project "https://github.com/include-what-you-use/include-what-you-use/archive/clang_${MAJOR_MINOR}.tar.gz" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use +} + +mason_run "$@" diff --git a/scripts/llvm/8.0.0/.travis.yml b/scripts/llvm/8.0.0/.travis.yml new file mode 100644 index 000000000..cc9a85738 --- /dev/null +++ b/scripts/llvm/8.0.0/.travis.yml @@ -0,0 +1,4 @@ +language: generic + +script: +- echo "nothing to do since travis cannot compile something as large as llvm" diff --git a/scripts/llvm/8.0.0/README.md b/scripts/llvm/8.0.0/README.md new file mode 100644 index 000000000..425bf0d1e --- /dev/null +++ b/scripts/llvm/8.0.0/README.md @@ -0,0 +1 @@ +For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md diff --git a/scripts/llvm/8.0.0/libcxx.diff b/scripts/llvm/8.0.0/libcxx.diff new file mode 100644 index 000000000..348556360 --- /dev/null +++ b/scripts/llvm/8.0.0/libcxx.diff @@ -0,0 +1,51 @@ +diff --git a/src/experimental/filesystem/operations.cpp b/src/experimental/filesystem/operations.cpp +index 2bc28c21d..bd173893c 100644 +--- a/src/experimental/filesystem/operations.cpp ++++ b/src/experimental/filesystem/operations.cpp +@@ -21,7 +21,34 @@ + #include + #include + #include /* values for fchmodat */ +-#if !defined(UTIME_OMIT) ++ ++#if (__APPLE__) ++#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101300 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ >= 40000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __ENVIRONMENT_.*_VERSION_MIN_REQUIRED__ ++#else ++// We can use the presence of UTIME_OMIT to detect platforms that provide ++// utimensat. ++#if defined(UTIME_OMIT) ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __APPLE__ ++ ++#if !defined(_LIBCXX_USE_UTIMENSAT) + #include // for ::utimes as used in __last_write_time + #endif + +@@ -560,9 +587,7 @@ void __last_write_time(const path& p, file_time_type new_time, + using namespace std::chrono; + std::error_code m_ec; + +- // We can use the presence of UTIME_OMIT to detect platforms that do not +- // provide utimensat. +-#if !defined(UTIME_OMIT) ++#if !defined(_LIBCXX_USE_UTIMENSAT) + // This implementation has a race condition between determining the + // last access time and attempting to set it to the same value using + // ::utimes diff --git a/scripts/llvm/8.0.0/script.sh b/scripts/llvm/8.0.0/script.sh new file mode 100755 index 000000000..980e4b461 --- /dev/null +++ b/scripts/llvm/8.0.0/script.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +# inherit all functions from llvm base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +function setup_release() { + : + # broken https://github.com/include-what-you-use/include-what-you-use/issues/592 + # get_llvm_project "https://github.com/include-what-you-use/include-what-you-use.git" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use "" 569a7ee + #get_llvm_project "https://github.com/include-what-you-use/include-what-you-use/archive/clang_${MAJOR_MINOR}.tar.gz" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use +} + +mason_run "$@" diff --git a/scripts/llvm/9.0.0/.travis.yml b/scripts/llvm/9.0.0/.travis.yml new file mode 100644 index 000000000..cc9a85738 --- /dev/null +++ b/scripts/llvm/9.0.0/.travis.yml @@ -0,0 +1,4 @@ +language: generic + +script: +- echo "nothing to do since travis cannot compile something as large as llvm" diff --git a/scripts/llvm/9.0.0/README.md b/scripts/llvm/9.0.0/README.md new file mode 100644 index 000000000..425bf0d1e --- /dev/null +++ b/scripts/llvm/9.0.0/README.md @@ -0,0 +1 @@ +For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md diff --git a/scripts/llvm/9.0.0/libcxx.diff b/scripts/llvm/9.0.0/libcxx.diff new file mode 100644 index 000000000..348556360 --- /dev/null +++ b/scripts/llvm/9.0.0/libcxx.diff @@ -0,0 +1,51 @@ +diff --git a/src/experimental/filesystem/operations.cpp b/src/experimental/filesystem/operations.cpp +index 2bc28c21d..bd173893c 100644 +--- a/src/experimental/filesystem/operations.cpp ++++ b/src/experimental/filesystem/operations.cpp +@@ -21,7 +21,34 @@ + #include + #include + #include /* values for fchmodat */ +-#if !defined(UTIME_OMIT) ++ ++#if (__APPLE__) ++#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101300 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ >= 40000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __ENVIRONMENT_.*_VERSION_MIN_REQUIRED__ ++#else ++// We can use the presence of UTIME_OMIT to detect platforms that provide ++// utimensat. ++#if defined(UTIME_OMIT) ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __APPLE__ ++ ++#if !defined(_LIBCXX_USE_UTIMENSAT) + #include // for ::utimes as used in __last_write_time + #endif + +@@ -560,9 +587,7 @@ void __last_write_time(const path& p, file_time_type new_time, + using namespace std::chrono; + std::error_code m_ec; + +- // We can use the presence of UTIME_OMIT to detect platforms that do not +- // provide utimensat. +-#if !defined(UTIME_OMIT) ++#if !defined(_LIBCXX_USE_UTIMENSAT) + // This implementation has a race condition between determining the + // last access time and attempting to set it to the same value using + // ::utimes diff --git a/scripts/llvm/9.0.0/script.sh b/scripts/llvm/9.0.0/script.sh new file mode 100755 index 000000000..980e4b461 --- /dev/null +++ b/scripts/llvm/9.0.0/script.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +# inherit all functions from llvm base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +function setup_release() { + : + # broken https://github.com/include-what-you-use/include-what-you-use/issues/592 + # get_llvm_project "https://github.com/include-what-you-use/include-what-you-use.git" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use "" 569a7ee + #get_llvm_project "https://github.com/include-what-you-use/include-what-you-use/archive/clang_${MAJOR_MINOR}.tar.gz" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use +} + +mason_run "$@" diff --git a/scripts/llvm/9.0.1/.travis.yml b/scripts/llvm/9.0.1/.travis.yml new file mode 100644 index 000000000..cc9a85738 --- /dev/null +++ b/scripts/llvm/9.0.1/.travis.yml @@ -0,0 +1,4 @@ +language: generic + +script: +- echo "nothing to do since travis cannot compile something as large as llvm" diff --git a/scripts/llvm/9.0.1/README.md b/scripts/llvm/9.0.1/README.md new file mode 100644 index 000000000..425bf0d1e --- /dev/null +++ b/scripts/llvm/9.0.1/README.md @@ -0,0 +1 @@ +For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md diff --git a/scripts/llvm/9.0.1/libcxx.diff b/scripts/llvm/9.0.1/libcxx.diff new file mode 100644 index 000000000..348556360 --- /dev/null +++ b/scripts/llvm/9.0.1/libcxx.diff @@ -0,0 +1,51 @@ +diff --git a/src/experimental/filesystem/operations.cpp b/src/experimental/filesystem/operations.cpp +index 2bc28c21d..bd173893c 100644 +--- a/src/experimental/filesystem/operations.cpp ++++ b/src/experimental/filesystem/operations.cpp +@@ -21,7 +21,34 @@ + #include + #include + #include /* values for fchmodat */ +-#if !defined(UTIME_OMIT) ++ ++#if (__APPLE__) ++#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101300 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ >= 110000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#elif defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) ++#if __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ >= 40000 ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __ENVIRONMENT_.*_VERSION_MIN_REQUIRED__ ++#else ++// We can use the presence of UTIME_OMIT to detect platforms that provide ++// utimensat. ++#if defined(UTIME_OMIT) ++#define _LIBCXX_USE_UTIMENSAT ++#endif ++#endif // __APPLE__ ++ ++#if !defined(_LIBCXX_USE_UTIMENSAT) + #include // for ::utimes as used in __last_write_time + #endif + +@@ -560,9 +587,7 @@ void __last_write_time(const path& p, file_time_type new_time, + using namespace std::chrono; + std::error_code m_ec; + +- // We can use the presence of UTIME_OMIT to detect platforms that do not +- // provide utimensat. +-#if !defined(UTIME_OMIT) ++#if !defined(_LIBCXX_USE_UTIMENSAT) + // This implementation has a race condition between determining the + // last access time and attempting to set it to the same value using + // ::utimes diff --git a/scripts/llvm/9.0.1/script.sh b/scripts/llvm/9.0.1/script.sh new file mode 100755 index 000000000..980e4b461 --- /dev/null +++ b/scripts/llvm/9.0.1/script.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# For context on this file see https://github.com/mapbox/mason/blob/master/scripts/llvm/base/README.md + +# dynamically determine the path to this package +HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" +# dynamically take name of package from directory +MASON_NAME=$(basename $(dirname $HERE)) +# dynamically take the version of the package from directory +MASON_VERSION=$(basename $HERE) +# inherit all functions from llvm base +source ${HERE}/../../${MASON_NAME}/base/common.sh + +function setup_release() { + : + # broken https://github.com/include-what-you-use/include-what-you-use/issues/592 + # get_llvm_project "https://github.com/include-what-you-use/include-what-you-use.git" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use "" 569a7ee + #get_llvm_project "https://github.com/include-what-you-use/include-what-you-use/archive/clang_${MAJOR_MINOR}.tar.gz" ${MASON_BUILD_PATH}/tools/clang/tools/include-what-you-use +} + +mason_run "$@" diff --git a/scripts/llvm/base/README.md b/scripts/llvm/base/README.md index 3b25630e6..21a87dad6 100644 --- a/scripts/llvm/base/README.md +++ b/scripts/llvm/base/README.md @@ -6,6 +6,8 @@ This readme documents: - Which sub-packages depend on the llvm package - How the llvm packages are built - How to create a new llvm package + sub-packages + - How to create a new _dev_ llvm package + sub-packages + - How to create a release package from a dev package (+ sub-packages) - How to use the binary packages ## What is llvm? @@ -211,6 +213,47 @@ MASON_PLATFORM=linux ./utils/llvm.sh publish 4.0.2 Note: `MASON_PLATFORM=linux` is only needed if your host is OS X. +#### Step 7: Test and Merge + +Once you publish, you should check the PR you created earlier to see if CI tests pass and run any other tests necessary to check your new package. Once tests have passed, merge your PR into master. + +You're done! + +## How to create a new dev llvm package + sub-packages + +#### Step 1: Create a mason branch + +`git checkout -b llvm-dev` + +#### Step 2: Create the new package + +Since a version number doesn't exist until LLVM makes a release, you should pick a version number that is one digit higher than the lastest release, e.g. if the latest release is 5.0.1, you would pick 6.0.0. Then create a new llvm package and sub-packages: + +``` +./utils/llvm.sh create 6.0.0 5.0.1 +``` + +#### Step 3: Override `setup_base_tools` + +- Edit the `script.sh` inside the directory of the new package you just created, e.g. from the example above `./scripts/llvm/6.0.0/script.sh` +- Override the `setup_base_tools` function with something like this https://github.com/mapbox/mason/blob/libzip-1.5.1/scripts/llvm/7.0.0/script.sh#L12-L27. This is where you tell mason to grab LLVM directly from http://llvm.org/git/llvm.git. Note: You can also specify a gitsha with `get_llvm_project` and this is currently being considered to become the recommended way of getting a dev version of LLVM since it is reproducible and easier to debug later. + +#### Step 4: Follow Steps 5 and 7 above in the publishing a new package section + +Following steps 5 and 6 above cover: + +- Pushing your new package to github +- Creating a PR +- Building the new package +- Publishing it +- Merging your PR once CI tests pass + +Note: When building your package, e.g. `./mason build llvm 6.0.0`, mason will use the URLS you provided in the `setup_base_tools` override. + +## How to create a release package from a dev package (+ sub-packages) + +Currently this is a WIP, and making this easier to achieve is currently an issue with a documented work-around here: https://github.com/mapbox/mason/issues/578#issuecomment-383735380 + ## How to use the binary packages The binary packages will work on: diff --git a/scripts/llvm/base/common.sh b/scripts/llvm/base/common.sh index 916d0c9e6..3b53c318a 100755 --- a/scripts/llvm/base/common.sh +++ b/scripts/llvm/base/common.sh @@ -12,7 +12,7 @@ if [[ $(uname -s) == 'Darwin' ]]; then export BUILD_AND_LINK_LIBCXX=true # not installing libcxx avoids this kind of problem with include-what-you-use - export INSTALL_LIBCXX=false + export INSTALL_LIBCXX=true # because iwyu hardcodes at https://github.com/include-what-you-use/include-what-you-use/blob/da5c9b17fec571e6b2bbca29145463d7eaa3582e/iwyu_driver.cc#L219 : ' /Library/Developer/CommandLineTools/usr/include/c++/v1/cstdlib:167:44: error: declaration conflicts with target of using declaration already in scope @@ -84,19 +84,19 @@ function get_llvm_project() { } function setup_base_tools() { - get_llvm_project "http://llvm.org/releases/${MASON_BASE_VERSION}/llvm-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/ - get_llvm_project "http://llvm.org/releases/${MASON_BASE_VERSION}/cfe-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/tools/clang - get_llvm_project "http://llvm.org/releases/${MASON_BASE_VERSION}/compiler-rt-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/projects/compiler-rt + get_llvm_project "https://github.com/llvm/llvm-project/releases/download/llvmorg-${MASON_BASE_VERSION}/llvm-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/ + get_llvm_project "https://github.com/llvm/llvm-project/releases/download/llvmorg-${MASON_BASE_VERSION}/clang-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/tools/clang + get_llvm_project "https://github.com/llvm/llvm-project/releases/download/llvmorg-${MASON_BASE_VERSION}/compiler-rt-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/projects/compiler-rt if [[ ${BUILD_AND_LINK_LIBCXX} == true ]]; then - get_llvm_project "http://llvm.org/releases/${MASON_BASE_VERSION}/libcxx-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/projects/libcxx - get_llvm_project "http://llvm.org/releases/${MASON_BASE_VERSION}/libcxxabi-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/projects/libcxxabi - get_llvm_project "http://llvm.org/releases/${MASON_BASE_VERSION}/libunwind-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/projects/libunwind + get_llvm_project "https://github.com/llvm/llvm-project/releases/download/llvmorg-${MASON_BASE_VERSION}/libcxx-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/projects/libcxx + get_llvm_project "https://github.com/llvm/llvm-project/releases/download/llvmorg-${MASON_BASE_VERSION}/libcxxabi-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/projects/libcxxabi + get_llvm_project "https://github.com/llvm/llvm-project/releases/download/llvmorg-${MASON_BASE_VERSION}/libunwind-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/projects/libunwind fi - get_llvm_project "http://llvm.org/releases/${MASON_BASE_VERSION}/openmp-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/projects/openmp - get_llvm_project "http://llvm.org/releases/${MASON_BASE_VERSION}/lld-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/tools/lld - get_llvm_project "http://llvm.org/releases/${MASON_BASE_VERSION}/clang-tools-extra-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/tools/clang/tools/extra - get_llvm_project "http://llvm.org/releases/${MASON_BASE_VERSION}/lldb-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/tools/lldb - get_llvm_project "http://llvm.org/releases/${MASON_BASE_VERSION}/polly-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/tools/polly + get_llvm_project "https://github.com/llvm/llvm-project/releases/download/llvmorg-${MASON_BASE_VERSION}/openmp-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/projects/openmp + get_llvm_project "https://github.com/llvm/llvm-project/releases/download/llvmorg-${MASON_BASE_VERSION}/lld-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/tools/lld + get_llvm_project "https://github.com/llvm/llvm-project/releases/download/llvmorg-${MASON_BASE_VERSION}/clang-tools-extra-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/tools/clang/tools/extra + get_llvm_project "https://github.com/llvm/llvm-project/releases/download/llvmorg-${MASON_BASE_VERSION}/lldb-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/tools/lldb + get_llvm_project "https://github.com/llvm/llvm-project/releases/download/llvmorg-${MASON_BASE_VERSION}/polly-${MASON_BASE_VERSION}.src.tar.xz" ${MASON_BUILD_PATH}/tools/polly # The include-what-you-use project often lags behind llvm releases, causing compile problems when you try to build it within llvm (and I don't know how feasible it is to build separately) # Hence this is disabled by default and must be either enabled here or added to a `setup_release` function per package version # pulls from a tagged version: @@ -126,12 +126,12 @@ function mason_load_source { } function mason_prepare_compile { - CCACHE_VERSION=3.3.4 - CMAKE_VERSION=3.8.2 - NINJA_VERSION=1.7.2 - CLANG_VERSION=5.0.0 + CCACHE_VERSION=4.0 + CMAKE_VERSION=3.18.1 + NINJA_VERSION=1.10.1 + CLANG_VERSION=9.0.1 LIBEDIT_VERSION=3.1 - BINUTILS_VERSION=2.30 + BINUTILS_VERSION=2.35 NCURSES_VERSION=6.1 ${MASON_DIR}/mason install clang++ ${CLANG_VERSION} @@ -156,22 +156,6 @@ function mason_prepare_compile { } function mason_compile { - if [[ $(uname -s) == 'Darwin' ]]; then - # ensure codesigning is working before starting - # this logic borrowed from homebrew llvm.rb formula - TMPDIR=$(mktemp -d) - (cd $TMPDIR && \ - cp /usr/bin/false llvm_check && \ - RESULT=0 && - /usr/bin/codesign -f -s lldb_codesign --dryrun llvm_check || RESULT=$? && - if [[ ${RESULT} != 0 ]]; then - echo "lldb_codesign identity must be available to build with LLDB." - echo "See: https://llvm.org/svn/llvm-project/lldb/trunk/docs/code-signing.txt" - exit 1 - fi - ) - fi - export CXX="${CUSTOM_CXX:-${MASON_CLANG}/bin/clang++}" export CC="${CUSTOM_CC:-${MASON_CLANG}/bin/clang}" echo "using CXX=${CXX}" @@ -187,12 +171,9 @@ function mason_compile { CMAKE_EXTRA_ARGS="" - if [[ ${MAJOR_MINOR} == "3.8" ]]; then - # workaround https://llvm.org/bugs/show_bug.cgi?id=25565 - perl -i -p -e "s/set\(codegen_deps intrinsics_gen\)/set\(codegen_deps intrinsics_gen attributes_inc\)/g;" lib/CodeGen/CMakeLists.txt - - # note: LIBCXX_ENABLE_STATIC_ABI_LIBRARY=ON is only needed with llvm < 3.9.0 to avoid libcxx(abi) build breaking when only a static libc++ exists - CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DLIBCXX_ENABLE_STATIC_ABI_LIBRARY=ON" + if [[ $(uname -s) == 'Darwin' ]]; then + # Disable building the debugserver on OSX and use the system one with llvm > 8 + CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DLLDB_USE_SYSTEM_DEBUGSERVER=ON" fi if [[ -d tools/clang/tools/include-what-you-use ]]; then @@ -201,6 +182,7 @@ function mason_compile { if [[ $(uname -s) == 'Darwin' ]]; then : ' + https://andreasfertig.blog/2021/02/clang-and-gcc-on-macos-catalina-finding-the-include-paths/ Note: C_INCLUDE_DIRS and DEFAULT_SYSROOT are critical options to understand to ensure C and C++ headers are predictably found. The way things work in clang++ on OS X (inside http://clang.llvm.org/doxygen/InitHeaderSearch_8cpp.html) is: @@ -208,36 +190,35 @@ function mason_compile { - The `:` separated `C_INCLUDE_DIRS` are added to the include paths - If `C_INCLUDE_DIRS` is present `InitHeaderSearch::AddDefaultCIncludePaths` returns early - Without that early return `/usr/include` would be added by default on OS X - - If `-isysroot` is passed then absolute `C_INCLUDE_DIRS` are appended to the sysroot - - So if sysroot=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/ and - C_INCLUDE_DIRS=/usr/include the actual path searched would be: - /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include - - Relative `C_INCLUDE_DIRS` seem pointless because they are not appended to the sysroot and so will not be portable + - Relative `C_INCLUDE_DIRS` are appended to the sysroot: + - For example this input: + DEFAULT_SYSROOT=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk + C_INCLUDE_DIRS=usr/include + - Would give this output: + /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include + - Absolute `C_INCLUDE_DIRS` are added to the header search paths without prepending the sysroot and therefore are not as portable. - clang++ finds C++ headers relative to itself at https://github.com/llvm-mirror/clang/blob/master/lib/Frontend/InitHeaderSearch.cpp#L469-L470 - - So, given on OS X we want to use the XCode/Apple provided libc++ and c++ headers we symlink the relative location to /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++ + - So, if the the XCode/Apple provided libc++ and c++ headers are desired then symlinking is used below to provide the relative location to /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++ - The alternative would be to symlink to the command line tools location (/Library/Developer/CommandLineTools/usr/include/c++/v1/) Another viable sysroot would be the command line tools at /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk - Generally each SDK/Platform version has its own C headers inside SDK_PATH/usr/include while all platforms share the C++ headers which - are at /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1/ + Generally each SDK/Platform version has its own C headers inside ${SDK_PATH}/usr/include while all platforms share the C++ headers which are at /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1/ NOTE: show search paths with: `clang -x c -v -E /dev/null` || `cpp -v` && `clang -Xlinker -v` ' - CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DC_INCLUDE_DIRS=/usr/include" + # https://reviews.llvm.org/D69221 + CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DC_INCLUDE_DIRS=/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include" # setting the default sysroot to an explicit SDK avoids clang++ adding `/usr/local/include` to the paths by default at https://github.com/llvm-mirror/clang/blob/91d69c3c9c62946245a0fe6526d5ec226dfe7408/lib/Frontend/InitHeaderSearch.cpp#L226 # because that value will be appended to the sysroot, not exist, and then get thrown out. If the sysroot were / then it would be added - CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DDEFAULT_SYSROOT=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk" + # This sysroot value was taken from `xcrun --show-sdk-path` + CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DDEFAULT_SYSROOT=/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk" CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DCLANG_DEFAULT_CXX_STDLIB=libc++" - CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DCMAKE_OSX_DEPLOYMENT_TARGET=10.12" + CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DCMAKE_OSX_DEPLOYMENT_TARGET=10.15" CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DLLVM_CREATE_XCODE_TOOLCHAIN=OFF -DLLVM_EXTERNALIZE_DEBUGINFO=ON" fi if [[ $(uname -s) == 'Linux' ]]; then CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DLLVM_BINUTILS_INCDIR=${LLVM_BINUTILS_INCDIR}" - if [[ ${MAJOR_MINOR} == "3.8" ]] && [[ ${BUILD_AND_LINK_LIBCXX} == true ]]; then - # note: LIBCXX_ENABLE_STATIC_ABI_LIBRARY=ON is only needed with llvm < 3.9.0 to avoid libcxx(abi) build breaking when only a static libc++ exists - CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DLIBCXX_ENABLE_STATIC_ABI_LIBRARY=ON" - fi fi # Strip this since we set CMAKE_OSX_DEPLOYMENT_TARGET above. We assume that we'd only upgrade to use this compiler on recent OS X systems and we want the potential performance benefit of targeting a more recent version @@ -265,10 +246,9 @@ function mason_compile { # https://blogs.gentoo.org/gsoc2016-native-clang/2016/05/31/build-gnu-free-executables-with-clang/ if [[ ${BUILD_AND_LINK_LIBCXX} == true ]]; then - CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DLIBCXX_ENABLE_ASSERTIONS=OFF -DLIBUNWIND_ENABLE_ASSERTIONS=OFF -DLIBCXXABI_USE_COMPILER_RT=ON -DLIBCXX_USE_COMPILER_RT=ON -DLIBCXXABI_ENABLE_ASSERTIONS=OFF -DLIBCXX_ENABLE_SHARED=OFF -DLIBCXX_ENABLE_STATIC=ON -DLIBCXXABI_ENABLE_SHARED=OFF -DLIBCXXABI_USE_LLVM_UNWINDER=ON -DLIBCXXABI_ENABLE_STATIC_UNWINDER=ON -DSANITIZER_USE_COMPILER_RT=ON -DLIBUNWIND_USE_COMPILER_RT=ON -DLIBUNWIND_ENABLE_STATIC=ON -DLIBUNWIND_ENABLE_SHARED=OFF" + CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DLIBCXX_ENABLE_ASSERTIONS=OFF -DLIBUNWIND_ENABLE_ASSERTIONS=OFF -DLIBCXXABI_USE_COMPILER_RT=ON -DLIBCXX_USE_COMPILER_RT=ON -DLIBCXXABI_ENABLE_ASSERTIONS=OFF -DLIBCXX_ENABLE_SHARED=OFF -DLIBCXX_ENABLE_STATIC=ON -DLIBCXXABI_ENABLE_SHARED=OFF -DLIBCXXABI_USE_LLVM_UNWINDER=ON -DLIBCXXABI_ENABLE_STATIC_UNWINDER=ON -DLIBUNWIND_USE_COMPILER_RT=ON -DLIBUNWIND_ENABLE_STATIC=ON -DLIBUNWIND_ENABLE_SHARED=OFF" fi - if [[ $(uname -s) == 'Linux' ]]; then echo "fixing editline" # hack to ensure that lldb finds editline to avoid: @@ -299,14 +279,17 @@ function mason_compile { cd ./build export CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -G Ninja -DCMAKE_MAKE_PROGRAM=${MASON_NINJA}/bin/ninja -DLLVM_ENABLE_ASSERTIONS=OFF -DCLANG_VENDOR=mapbox/mason -DCMAKE_CXX_COMPILER_LAUNCHER=${MASON_CCACHE}/bin/ccache" - export CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} -DCMAKE_BUILD_TYPE=Release -DLLVM_INCLUDE_DOCS=OFF" - export CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DLLVM_TARGETS_TO_BUILD=BPF;X86 -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=WebAssembly -DCLANG_REPOSITORY_STRING=https://github.com/mapbox/mason -DCLANG_VENDOR_UTI=org.mapbox.llvm" - export CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DLLDB_RELOCATABLE_PYTHON=1 -DLLDB_DISABLE_PYTHON=1 -DLLVM_ENABLE_TERMINFO=0" + export CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} -DCMAKE_BUILD_TYPE=MinSizeRel -DLLVM_INCLUDE_DOCS=OFF" + export CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DLLVM_TARGETS_TO_BUILD=BPF;X86;WebAssembly -DCLANG_REPOSITORY_STRING=https://github.com/mapbox/mason -DCLANG_VENDOR_UTI=org.mapbox.llvm" + export CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DLLVM_ENABLE_TERMINFO=0 -DLLVM_INCLUDE_EXAMPLES=OFF -DLLVM_ENABLE_UNWIND_TABLES=OFF -DLLVM_ENABLE_EH=ON -DLLVM_ENABLE_RTTI=ON" # look for curses and libedit on linux # note: python would need swig export CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DCMAKE_PREFIX_PATH=${MASON_NCURSES};${MASON_LIBEDIT}" echo "running cmake configure for llvm+friends build" + echo + echo "All cmake options: '${CMAKE_EXTRA_ARGS}'" + echo if [[ $(uname -s) == 'Linux' ]]; then ${MASON_CMAKE}/bin/cmake ../ ${CMAKE_EXTRA_ARGS} \ -DCMAKE_CXX_STANDARD_LIBRARIES="-L${MASON_LIBEDIT}/lib -L${MASON_NCURSES}/lib -L$(pwd)/lib -lc++ -lc++abi -lunwind -pthread -lc -ldl -lrt -rtlib=compiler-rt" \ @@ -367,7 +350,7 @@ function mason_compile { -DCMAKE_INSTALL_PREFIX="${MASON_PREFIX}/asan" -DLLVM_USE_SANITIZER="Address;Undefined" \ -DLIBCXX_INSTALL_LIBRARY=ON -DLIBCXX_INSTALL_HEADERS=ON ${MASON_NINJA}/bin/ninja cxx cxxabi -j${MASON_CONCURRENCY} - ${MASON_NINJA}/bin/ninja install-cxx install-libcxxabi -j${MASON_CONCURRENCY} + ${MASON_NINJA}/bin/ninja install-cxx install-cxxabi -j${MASON_CONCURRENCY} # MemoryWithOrigins if [[ $(uname -s) == 'Darwin' ]]; then @@ -379,7 +362,7 @@ function mason_compile { -DCMAKE_INSTALL_PREFIX="${MASON_PREFIX}/msan" -DLLVM_USE_SANITIZER="MemoryWithOrigins" \ -DLIBCXX_INSTALL_LIBRARY=ON -DLIBCXX_INSTALL_HEADERS=ON ${MASON_NINJA}/bin/ninja cxx cxxabi -j${MASON_CONCURRENCY} - ${MASON_NINJA}/bin/ninja install-cxx install-libcxxabi -j${MASON_CONCURRENCY} + ${MASON_NINJA}/bin/ninja install-cxx install-cxxabi -j${MASON_CONCURRENCY} fi # Thread @@ -389,7 +372,7 @@ function mason_compile { -DCMAKE_INSTALL_PREFIX="${MASON_PREFIX}/tsan" -DLLVM_USE_SANITIZER="Thread" \ -DLIBCXX_INSTALL_LIBRARY=ON -DLIBCXX_INSTALL_HEADERS=ON ${MASON_NINJA}/bin/ninja cxx cxxabi -j${MASON_CONCURRENCY} - ${MASON_NINJA}/bin/ninja install-cxx install-libcxxabi -j${MASON_CONCURRENCY} + ${MASON_NINJA}/bin/ninja install-cxx install-cxxabi -j${MASON_CONCURRENCY} } diff --git a/scripts/lua/5.1.0/script.sh b/scripts/lua/5.1.0/script.sh index 1193421eb..918248b27 100755 --- a/scripts/lua/5.1.0/script.sh +++ b/scripts/lua/5.1.0/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/liblua.a function mason_load_source { mason_download \ - http://www.lua.org/ftp/lua-5.1.tar.gz \ + https://www.lua.org/ftp/lua-5.1.tar.gz \ 3f8d5a84a38423829765512118bbf26c500b0c06 mason_extract_tar_gz diff --git a/scripts/lua/5.2.4/script.sh b/scripts/lua/5.2.4/script.sh index 03b864452..ac45bec3e 100755 --- a/scripts/lua/5.2.4/script.sh +++ b/scripts/lua/5.2.4/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/liblua.a function mason_load_source { mason_download \ - http://www.lua.org/ftp/lua-${MASON_VERSION}.tar.gz \ + https://www.lua.org/ftp/lua-${MASON_VERSION}.tar.gz \ 6dd4526fdae5a7f76e44febf4d3066614920c43e mason_extract_tar_gz diff --git a/scripts/lua/5.3.0/script.sh b/scripts/lua/5.3.0/script.sh index d755a6c50..688a24ad5 100755 --- a/scripts/lua/5.3.0/script.sh +++ b/scripts/lua/5.3.0/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/liblua.a function mason_load_source { mason_download \ - http://www.lua.org/ftp/lua-5.3.0.tar.gz \ + https://www.lua.org/ftp/lua-5.3.0.tar.gz \ 44ffcfd0f38445c76e5d58777089f392bed175c3 mason_extract_tar_gz diff --git a/scripts/lz4/1.8.2/.travis.yml b/scripts/lz4/1.8.2/.travis.yml new file mode 100644 index 000000000..3431d4fb7 --- /dev/null +++ b/scripts/lz4/1.8.2/.travis.yml @@ -0,0 +1,16 @@ +language: cpp + +sudo: false + +matrix: + include: + - os: osx + compiler: clang + - os: linux + compiler: clang + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/lz4/1.8.2/script.sh b/scripts/lz4/1.8.2/script.sh new file mode 100755 index 000000000..948aa47db --- /dev/null +++ b/scripts/lz4/1.8.2/script.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +MASON_NAME=lz4 +MASON_VERSION=1.8.2 +MASON_LIB_FILE=lib/liblz4.a +MASON_PKGCONFIG_FILE=lib/pkgconfig/liblz4.pc + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/lz4/lz4/archive/v1.8.2.tar.gz \ + 26676ba8d3e6c616dc2377afddc6ffb84c260d1d + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + PREFIX=${MASON_PREFIX} make BUILD_SHARED=no install -C lib +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mapnik/26d3084ea/.travis.yml b/scripts/mapnik/26d3084ea/.travis.yml new file mode 100644 index 000000000..924a48996 --- /dev/null +++ b/scripts/mapnik/26d3084ea/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11.3 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + - xutils-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mapnik/26d3084ea/script.sh b/scripts/mapnik/26d3084ea/script.sh new file mode 100755 index 000000000..3fd89e835 --- /dev/null +++ b/scripts/mapnik/26d3084ea/script.sh @@ -0,0 +1,179 @@ +#!/usr/bin/env bash + +MASON_NAME=mapnik +MASON_VERSION=26d3084ea +MASON_LIB_FILE=lib/libmapnik.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapnik-v${MASON_VERSION} + if [[ ! -d ${MASON_BUILD_PATH} ]]; then + git clone https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + (cd ${MASON_BUILD_PATH} && git checkout ${MASON_VERSION} && git submodule update --init) + fi +} + +function install() { + ${MASON_DIR}/mason install $1 $2 + MASON_PLATFORM_ID=$(${MASON_DIR}/mason env MASON_PLATFORM_ID) + if [[ ! -d ${MASON_ROOT}/${MASON_PLATFORM_ID}/${1}/${2} ]]; then + if [[ ${3:-false} != false ]]; then + LA_FILE=$(${MASON_DIR}/mason prefix $1 $2)/lib/$3.la + if [[ -f ${LA_FILE} ]]; then + perl -i -p -e 's:\Q$ENV{HOME}/build/mapbox/mason\E:$ENV{PWD}:g' ${LA_FILE} + else + echo "$LA_FILE not found" + fi + fi + fi + ${MASON_DIR}/mason link $1 $2 +} + +ICU_VERSION="57.1" + +function mason_prepare_compile { + install jpeg_turbo 1.5.1 libjpeg + install libpng 1.6.28 libpng + install libtiff 4.0.7 libtiff + install libpq 9.6.2 + install sqlite 3.17.0 libsqlite3 + install expat 2.2.0 libexpat + install icu ${ICU_VERSION} + install proj 4.9.3 libproj + install pixman 0.34.0 libpixman-1 + install cairo 1.14.8 libcairo + install webp 0.6.0 libwebp + install libgdal 2.1.3 libgdal + install boost 1.66.0 + install boost_libsystem 1.66.0 + install boost_libfilesystem 1.66.0 + install boost_libprogram_options 1.66.0 + install boost_libregex_icu57 1.66.0 + install freetype 2.7.1 libfreetype + install harfbuzz 1.4.2-ft libharfbuzz +} + +function mason_compile { + export PATH="${MASON_ROOT}/.link/bin:${PATH}" + MASON_LINKED_REL="${MASON_ROOT}/.link" + MASON_LINKED_ABS="${MASON_ROOT}/.link" + + # The mapnik configure check for c++14 fails when mason hardcodes c++11 in the CXXFLAGS + # So we remove it here + export CXXFLAGS="${CXXFLAGS//-std=c++11}" + + if [[ $(uname -s) == 'Linux' ]]; then + echo "CUSTOM_LDFLAGS = '${LDFLAGS} -Wl,-z,origin -Wl,-rpath=\\\$\$ORIGIN/../lib/ -Wl,-rpath=\\\$\$ORIGIN/../../'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0'" >> config.py + else + echo "CUSTOM_LDFLAGS = '${LDFLAGS}'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS}'" >> config.py + fi + + # setup `mapnik-settings.env` (like bootstrap.sh does) + # note: we don't use bootstrap.sh to be able to control + # mason versions here and use the mason we are running + echo "export PROJ_LIB=${MASON_LINKED_ABS}/share/proj" > mapnik-settings.env + echo "export ICU_DATA=${MASON_LINKED_ABS}/share/icu/${ICU_VERSION}" >> mapnik-settings.env + echo "export GDAL_DATA=${MASON_LINKED_ABS}/share/gdal" >> mapnik-settings.env + + RESULT=0 + + ./configure \ + CXX="${CXX}" \ + CC="${CC}" \ + PREFIX="${MASON_PREFIX}" \ + RUNTIME_LINK="static" \ + INPUT_PLUGINS="all" \ + ENABLE_SONAME=False \ + PKG_CONFIG_PATH="${MASON_LINKED_REL}/lib/pkgconfig" \ + PATH_REMOVE="/usr:/usr/local" \ + BOOST_INCLUDES="${MASON_LINKED_REL}/include" \ + BOOST_LIBS="${MASON_LINKED_REL}/lib" \ + ICU_INCLUDES="${MASON_LINKED_REL}/include" \ + ICU_LIBS="${MASON_LINKED_REL}/lib" \ + HB_INCLUDES="${MASON_LINKED_REL}/include" \ + HB_LIBS="${MASON_LINKED_REL}/lib" \ + PNG_INCLUDES="${MASON_LINKED_REL}/include/libpng16" \ + PNG_LIBS="${MASON_LINKED_REL}/lib" \ + JPEG_INCLUDES="${MASON_LINKED_REL}/include" \ + JPEG_LIBS="${MASON_LINKED_REL}/lib" \ + TIFF_INCLUDES="${MASON_LINKED_REL}/include" \ + TIFF_LIBS="${MASON_LINKED_REL}/lib" \ + WEBP_INCLUDES="${MASON_LINKED_REL}/include" \ + WEBP_LIBS="${MASON_LINKED_REL}/lib" \ + PROJ_INCLUDES="${MASON_LINKED_REL}/include" \ + PROJ_LIBS="${MASON_LINKED_REL}/lib" \ + PG_INCLUDES="${MASON_LINKED_REL}/include" \ + PG_LIBS="${MASON_LINKED_REL}/lib" \ + FREETYPE_INCLUDES="${MASON_LINKED_REL}/include/freetype2" \ + FREETYPE_LIBS="${MASON_LINKED_REL}/lib" \ + SVG_RENDERER=True \ + CAIRO_INCLUDES="${MASON_LINKED_REL}/include" \ + CAIRO_LIBS="${MASON_LINKED_REL}/lib" \ + SQLITE_INCLUDES="${MASON_LINKED_REL}/include" \ + SQLITE_LIBS="${MASON_LINKED_REL}/lib" \ + GDAL_CONFIG="${MASON_LINKED_REL}/bin/gdal-config" \ + PG_CONFIG="${MASON_LINKED_REL}/bin/pg_config" \ + BENCHMARK=False \ + CPP_TESTS=False \ + PGSQL2SQLITE=True \ + SAMPLE_INPUT_PLUGINS=False \ + DEMO=False \ + XMLPARSER="ptree" \ + NO_ATEXIT=True \ + SVG2PNG=True || RESULT=$? + + # if configure failed, dump out config details before exiting + if [[ ${RESULT} != 0 ]]; then + cat ${MASON_BUILD_PATH}"/config.log" + cat config.py + false # then fail + fi + + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + JOBS=4 make + else + JOBS=${MASON_CONCURRENCY} make + fi + + make install + if [[ $(uname -s) == 'Darwin' ]]; then + install_name_tool -id @loader_path/lib/libmapnik.dylib ${MASON_PREFIX}"/lib/libmapnik.dylib"; + PLUGINDIRS=${MASON_PREFIX}"/lib/mapnik/input/*.input"; + for f in $PLUGINDIRS; do + echo $f; + echo `basename $f`; + install_name_tool -id plugins/input/`basename $f` $f; + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../../../lib/libmapnik.dylib $f; + done; + # command line tools + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-index" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-render" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/shapeindex" + fi + # fix mapnik-config entries for deps + HERE=$(pwd) + python -c "import re;data=open('$MASON_PREFIX/bin/mapnik-config','r').read();data=re.sub(r'-(isysroot)\s\/([0-9a-zA-Z_\/\-\.]+)', '', data);open('$MASON_PREFIX/bin/mapnik-config','w').write(data.replace('$HERE','.').replace('${MASON_ROOT}','./mason_packages'))" + cat $MASON_PREFIX/bin/mapnik-config +} + +function mason_cflags { + ${MASON_PREFIX}/bin/mapnik-config --cflags +} + +function mason_ldflags { + ${MASON_PREFIX}/bin/mapnik-config --ldflags +} + +function mason_static_libs { + ${MASON_PREFIX}/bin/mapnik-config --dep-libs +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mapnik/3.0.13-1/script.sh b/scripts/mapnik/3.0.13-1/script.sh index 56876f259..122f38cf0 100755 --- a/scripts/mapnik/3.0.13-1/script.sh +++ b/scripts/mapnik/3.0.13-1/script.sh @@ -17,7 +17,7 @@ function mason_load_source { #mkdir -p $(dirname ${MASON_BUILD_PATH}) #if [[ ! -d ${MASON_BUILD_PATH} ]]; then - # git clone -b 3.0.x-mason-upgrades --single-branch http://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + # git clone -b 3.0.x-mason-upgrades --single-branch https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} # (cd ${MASON_BUILD_PATH} && git submodule update --init deps/mapbox/variant/) #fi } diff --git a/scripts/mapnik/3.0.13-2/script.sh b/scripts/mapnik/3.0.13-2/script.sh index 086d2a739..c33e77d59 100755 --- a/scripts/mapnik/3.0.13-2/script.sh +++ b/scripts/mapnik/3.0.13-2/script.sh @@ -17,7 +17,7 @@ function mason_load_source { #mkdir -p $(dirname ${MASON_BUILD_PATH}) #if [[ ! -d ${MASON_BUILD_PATH} ]]; then - # git clone -b 3.0.x-mason-upgrades --single-branch http://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + # git clone -b 3.0.x-mason-upgrades --single-branch https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} # (cd ${MASON_BUILD_PATH} && git submodule update --init deps/mapbox/variant/) #fi } diff --git a/scripts/mapnik/3.0.13-3/script.sh b/scripts/mapnik/3.0.13-3/script.sh index 9d2cd6a57..5b8a222f7 100755 --- a/scripts/mapnik/3.0.13-3/script.sh +++ b/scripts/mapnik/3.0.13-3/script.sh @@ -17,7 +17,7 @@ function mason_load_source { #mkdir -p $(dirname ${MASON_BUILD_PATH}) #if [[ ! -d ${MASON_BUILD_PATH} ]]; then - # git clone -b 3.0.x-mason-upgrades --single-branch http://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + # git clone -b 3.0.x-mason-upgrades --single-branch https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} # (cd ${MASON_BUILD_PATH} && git submodule update --init deps/mapbox/variant/) #fi } diff --git a/scripts/mapnik/3.0.13/script.sh b/scripts/mapnik/3.0.13/script.sh index f683fd5e1..bf44939d4 100755 --- a/scripts/mapnik/3.0.13/script.sh +++ b/scripts/mapnik/3.0.13/script.sh @@ -17,7 +17,7 @@ function mason_load_source { #mkdir -p $(dirname ${MASON_BUILD_PATH}) #if [[ ! -d ${MASON_BUILD_PATH} ]]; then - # git clone -b 3.0.x-mason-upgrades --single-branch http://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + # git clone -b 3.0.x-mason-upgrades --single-branch https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} # (cd ${MASON_BUILD_PATH} && git submodule update --init deps/mapbox/variant/) #fi } diff --git a/scripts/mapnik/3.0.20/.travis.yml b/scripts/mapnik/3.0.20/.travis.yml new file mode 100644 index 000000000..629abe9b1 --- /dev/null +++ b/scripts/mapnik/3.0.20/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + - xutils-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mapnik/3.0.20/script.sh b/scripts/mapnik/3.0.20/script.sh new file mode 100755 index 000000000..24c8ba904 --- /dev/null +++ b/scripts/mapnik/3.0.20/script.sh @@ -0,0 +1,177 @@ +#!/usr/bin/env bash + +MASON_NAME=mapnik +MASON_VERSION=3.0.20 +MASON_LIB_FILE=lib/libmapnik.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapnik/mapnik/releases/download/v${MASON_VERSION}/mapnik-v${MASON_VERSION}.tar.bz2 \ + d75452b32c0376d105f40160432a1f9188fc8326 + mason_extract_tar_bz2 + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapnik-v${MASON_VERSION} +} + +function install() { + ${MASON_DIR}/mason install $1 $2 + MASON_PLATFORM_ID=$(${MASON_DIR}/mason env MASON_PLATFORM_ID) + if [[ ! -d ${MASON_ROOT}/${MASON_PLATFORM_ID}/${1}/${2} ]]; then + if [[ ${3:-false} != false ]]; then + LA_FILE=$(${MASON_DIR}/mason prefix $1 $2)/lib/$3.la + if [[ -f ${LA_FILE} ]]; then + perl -i -p -e 's:\Q$ENV{HOME}/build/mapbox/mason\E:$ENV{PWD}:g' ${LA_FILE} + else + echo "$LA_FILE not found" + fi + fi + fi + ${MASON_DIR}/mason link $1 $2 +} + +ICU_VERSION="57.1" + +function mason_prepare_compile { + install jpeg_turbo 1.5.1 libjpeg + install libpng 1.6.28 libpng + install libtiff 4.0.7 libtiff + install libpq 9.6.2 + install sqlite 3.17.0 libsqlite3 + install expat 2.2.0 libexpat + install icu ${ICU_VERSION} + install proj 4.9.3 libproj + install pixman 0.34.0 libpixman-1 + install cairo 1.14.8 libcairo + install webp 0.6.0 libwebp + install libgdal 2.1.3 libgdal + install boost 1.65.1 + install boost_libsystem 1.65.1 + install boost_libfilesystem 1.65.1 + install boost_libprogram_options 1.65.1 + install boost_libregex_icu57 1.65.1 + install freetype 2.7.1 libfreetype + install harfbuzz 1.4.2-ft libharfbuzz +} + +function mason_compile { + export PATH="${MASON_ROOT}/.link/bin:${PATH}" + MASON_LINKED_REL="${MASON_ROOT}/.link" + MASON_LINKED_ABS="${MASON_ROOT}/.link" + + if [[ $(uname -s) == 'Linux' ]]; then + echo "CUSTOM_LDFLAGS = '${LDFLAGS} -Wl,-z,origin -Wl,-rpath=\\\$\$ORIGIN/../lib/ -Wl,-rpath=\\\$\$ORIGIN/../../'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0'" >> config.py + else + echo "CUSTOM_LDFLAGS = '${LDFLAGS}'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS}'" >> config.py + fi + + # setup `mapnik-settings.env` (like bootstrap.sh does) + # note: we don't use bootstrap.sh to be able to control + # mason versions here and use the mason we are running + echo "export PROJ_LIB=${MASON_LINKED_ABS}/share/proj" > mapnik-settings.env + echo "export ICU_DATA=${MASON_LINKED_ABS}/share/icu/${ICU_VERSION}" >> mapnik-settings.env + echo "export GDAL_DATA=${MASON_LINKED_ABS}/share/gdal" >> mapnik-settings.env + + RESULT=0 + + ./configure \ + CXX="${CXX}" \ + CC="${CC}" \ + PREFIX="${MASON_PREFIX}" \ + RUNTIME_LINK="static" \ + INPUT_PLUGINS="all" \ + ENABLE_GLIBC_WORKAROUND=True \ + ENABLE_SONAME=False \ + PKG_CONFIG_PATH="${MASON_LINKED_REL}/lib/pkgconfig" \ + PATH_REMOVE="/usr:/usr/local" \ + BOOST_INCLUDES="${MASON_LINKED_REL}/include" \ + BOOST_LIBS="${MASON_LINKED_REL}/lib" \ + ICU_INCLUDES="${MASON_LINKED_REL}/include" \ + ICU_LIBS="${MASON_LINKED_REL}/lib" \ + HB_INCLUDES="${MASON_LINKED_REL}/include" \ + HB_LIBS="${MASON_LINKED_REL}/lib" \ + PNG_INCLUDES="${MASON_LINKED_REL}/include/libpng16" \ + PNG_LIBS="${MASON_LINKED_REL}/lib" \ + JPEG_INCLUDES="${MASON_LINKED_REL}/include" \ + JPEG_LIBS="${MASON_LINKED_REL}/lib" \ + TIFF_INCLUDES="${MASON_LINKED_REL}/include" \ + TIFF_LIBS="${MASON_LINKED_REL}/lib" \ + WEBP_INCLUDES="${MASON_LINKED_REL}/include" \ + WEBP_LIBS="${MASON_LINKED_REL}/lib" \ + PROJ_INCLUDES="${MASON_LINKED_REL}/include" \ + PROJ_LIBS="${MASON_LINKED_REL}/lib" \ + PG_INCLUDES="${MASON_LINKED_REL}/include" \ + PG_LIBS="${MASON_LINKED_REL}/lib" \ + FREETYPE_INCLUDES="${MASON_LINKED_REL}/include/freetype2" \ + FREETYPE_LIBS="${MASON_LINKED_REL}/lib" \ + SVG_RENDERER=True \ + CAIRO_INCLUDES="${MASON_LINKED_REL}/include" \ + CAIRO_LIBS="${MASON_LINKED_REL}/lib" \ + SQLITE_INCLUDES="${MASON_LINKED_REL}/include" \ + SQLITE_LIBS="${MASON_LINKED_REL}/lib" \ + GDAL_CONFIG="${MASON_LINKED_REL}/bin/gdal-config" \ + PG_CONFIG="${MASON_LINKED_REL}/bin/pg_config" \ + BENCHMARK=False \ + CPP_TESTS=False \ + PGSQL2SQLITE=True \ + SAMPLE_INPUT_PLUGINS=False \ + DEMO=False \ + XMLPARSER="ptree" \ + NO_ATEXIT=True \ + SVG2PNG=True || RESULT=$? + + # if configure failed, dump out config details before exiting + if [[ ${RESULT} != 0 ]]; then + cat ${MASON_BUILD_PATH}"/config.log" + cat config.py + false # then fail + fi + + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + JOBS=4 make + else + JOBS=${MASON_CONCURRENCY} make + fi + + make install + if [[ $(uname -s) == 'Darwin' ]]; then + install_name_tool -id @loader_path/lib/libmapnik.dylib ${MASON_PREFIX}"/lib/libmapnik.dylib"; + PLUGINDIRS=${MASON_PREFIX}"/lib/mapnik/input/*.input"; + for f in $PLUGINDIRS; do + echo $f; + echo `basename $f`; + install_name_tool -id plugins/input/`basename $f` $f; + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../../../lib/libmapnik.dylib $f; + done; + # command line tools + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-index" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-render" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/shapeindex" + fi + # fix mapnik-config entries for deps + HERE=$(pwd) + python -c "import re;data=open('$MASON_PREFIX/bin/mapnik-config','r').read();data=re.sub(r'-(isysroot)\s\/([0-9a-zA-Z_\/\-\.]+)', '', data);open('$MASON_PREFIX/bin/mapnik-config','w').write(data.replace('$HERE','.').replace('${MASON_ROOT}','./mason_packages'))" + cat $MASON_PREFIX/bin/mapnik-config +} + +function mason_cflags { + ${MASON_PREFIX}/bin/mapnik-config --cflags +} + +function mason_ldflags { + ${MASON_PREFIX}/bin/mapnik-config --ldflags +} + +function mason_static_libs { + ${MASON_PREFIX}/bin/mapnik-config --dep-libs +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mapnik/3.0.21/.travis.yml b/scripts/mapnik/3.0.21/.travis.yml new file mode 100644 index 000000000..629abe9b1 --- /dev/null +++ b/scripts/mapnik/3.0.21/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + - xutils-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mapnik/3.0.21/script.sh b/scripts/mapnik/3.0.21/script.sh new file mode 100755 index 000000000..040fc8601 --- /dev/null +++ b/scripts/mapnik/3.0.21/script.sh @@ -0,0 +1,177 @@ +#!/usr/bin/env bash + +MASON_NAME=mapnik +MASON_VERSION=3.0.21 +MASON_LIB_FILE=lib/libmapnik.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapnik/mapnik/releases/download/v${MASON_VERSION}/mapnik-v${MASON_VERSION}.tar.bz2 \ + 712b7a96bd425d22a40c17537ad0c4e92d695a9f + mason_extract_tar_bz2 + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapnik-v${MASON_VERSION} +} + +function install() { + ${MASON_DIR}/mason install $1 $2 + MASON_PLATFORM_ID=$(${MASON_DIR}/mason env MASON_PLATFORM_ID) + if [[ ! -d ${MASON_ROOT}/${MASON_PLATFORM_ID}/${1}/${2} ]]; then + if [[ ${3:-false} != false ]]; then + LA_FILE=$(${MASON_DIR}/mason prefix $1 $2)/lib/$3.la + if [[ -f ${LA_FILE} ]]; then + perl -i -p -e 's:\Q$ENV{HOME}/build/mapbox/mason\E:$ENV{PWD}:g' ${LA_FILE} + else + echo "$LA_FILE not found" + fi + fi + fi + ${MASON_DIR}/mason link $1 $2 +} + +ICU_VERSION="57.1" + +function mason_prepare_compile { + install jpeg_turbo 1.5.1 libjpeg + install libpng 1.6.28 libpng + install libtiff 4.0.7 libtiff + install libpq 9.6.2 + install sqlite 3.17.0 libsqlite3 + install expat 2.2.0 libexpat + install icu ${ICU_VERSION} + install proj 4.9.3 libproj + install pixman 0.34.0 libpixman-1 + install cairo 1.14.8 libcairo + install webp 0.6.0 libwebp + install libgdal 2.1.3 libgdal + install boost 1.65.1 + install boost_libsystem 1.65.1 + install boost_libfilesystem 1.65.1 + install boost_libprogram_options 1.65.1 + install boost_libregex_icu57 1.65.1 + install freetype 2.7.1 libfreetype + install harfbuzz 1.4.2-ft libharfbuzz +} + +function mason_compile { + export PATH="${MASON_ROOT}/.link/bin:${PATH}" + MASON_LINKED_REL="${MASON_ROOT}/.link" + MASON_LINKED_ABS="${MASON_ROOT}/.link" + + if [[ $(uname -s) == 'Linux' ]]; then + echo "CUSTOM_LDFLAGS = '${LDFLAGS} -Wl,-z,origin -Wl,-rpath=\\\$\$ORIGIN/../lib/ -Wl,-rpath=\\\$\$ORIGIN/../../'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0'" >> config.py + else + echo "CUSTOM_LDFLAGS = '${LDFLAGS}'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS}'" >> config.py + fi + + # setup `mapnik-settings.env` (like bootstrap.sh does) + # note: we don't use bootstrap.sh to be able to control + # mason versions here and use the mason we are running + echo "export PROJ_LIB=${MASON_LINKED_ABS}/share/proj" > mapnik-settings.env + echo "export ICU_DATA=${MASON_LINKED_ABS}/share/icu/${ICU_VERSION}" >> mapnik-settings.env + echo "export GDAL_DATA=${MASON_LINKED_ABS}/share/gdal" >> mapnik-settings.env + + RESULT=0 + + ./configure \ + CXX="${CXX}" \ + CC="${CC}" \ + PREFIX="${MASON_PREFIX}" \ + RUNTIME_LINK="static" \ + INPUT_PLUGINS="all" \ + ENABLE_GLIBC_WORKAROUND=True \ + ENABLE_SONAME=False \ + PKG_CONFIG_PATH="${MASON_LINKED_REL}/lib/pkgconfig" \ + PATH_REMOVE="/usr:/usr/local" \ + BOOST_INCLUDES="${MASON_LINKED_REL}/include" \ + BOOST_LIBS="${MASON_LINKED_REL}/lib" \ + ICU_INCLUDES="${MASON_LINKED_REL}/include" \ + ICU_LIBS="${MASON_LINKED_REL}/lib" \ + HB_INCLUDES="${MASON_LINKED_REL}/include" \ + HB_LIBS="${MASON_LINKED_REL}/lib" \ + PNG_INCLUDES="${MASON_LINKED_REL}/include/libpng16" \ + PNG_LIBS="${MASON_LINKED_REL}/lib" \ + JPEG_INCLUDES="${MASON_LINKED_REL}/include" \ + JPEG_LIBS="${MASON_LINKED_REL}/lib" \ + TIFF_INCLUDES="${MASON_LINKED_REL}/include" \ + TIFF_LIBS="${MASON_LINKED_REL}/lib" \ + WEBP_INCLUDES="${MASON_LINKED_REL}/include" \ + WEBP_LIBS="${MASON_LINKED_REL}/lib" \ + PROJ_INCLUDES="${MASON_LINKED_REL}/include" \ + PROJ_LIBS="${MASON_LINKED_REL}/lib" \ + PG_INCLUDES="${MASON_LINKED_REL}/include" \ + PG_LIBS="${MASON_LINKED_REL}/lib" \ + FREETYPE_INCLUDES="${MASON_LINKED_REL}/include/freetype2" \ + FREETYPE_LIBS="${MASON_LINKED_REL}/lib" \ + SVG_RENDERER=True \ + CAIRO_INCLUDES="${MASON_LINKED_REL}/include" \ + CAIRO_LIBS="${MASON_LINKED_REL}/lib" \ + SQLITE_INCLUDES="${MASON_LINKED_REL}/include" \ + SQLITE_LIBS="${MASON_LINKED_REL}/lib" \ + GDAL_CONFIG="${MASON_LINKED_REL}/bin/gdal-config" \ + PG_CONFIG="${MASON_LINKED_REL}/bin/pg_config" \ + BENCHMARK=False \ + CPP_TESTS=False \ + PGSQL2SQLITE=True \ + SAMPLE_INPUT_PLUGINS=False \ + DEMO=False \ + XMLPARSER="ptree" \ + NO_ATEXIT=True \ + SVG2PNG=True || RESULT=$? + + # if configure failed, dump out config details before exiting + if [[ ${RESULT} != 0 ]]; then + cat ${MASON_BUILD_PATH}"/config.log" + cat config.py + false # then fail + fi + + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + JOBS=4 make + else + JOBS=${MASON_CONCURRENCY} make + fi + + make install + if [[ $(uname -s) == 'Darwin' ]]; then + install_name_tool -id @loader_path/lib/libmapnik.dylib ${MASON_PREFIX}"/lib/libmapnik.dylib"; + PLUGINDIRS=${MASON_PREFIX}"/lib/mapnik/input/*.input"; + for f in $PLUGINDIRS; do + echo $f; + echo `basename $f`; + install_name_tool -id plugins/input/`basename $f` $f; + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../../../lib/libmapnik.dylib $f; + done; + # command line tools + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-index" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-render" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/shapeindex" + fi + # fix mapnik-config entries for deps + HERE=$(pwd) + python -c "import re;data=open('$MASON_PREFIX/bin/mapnik-config','r').read();data=re.sub(r'-(isysroot)\s\/([0-9a-zA-Z_\/\-\.]+)', '', data);open('$MASON_PREFIX/bin/mapnik-config','w').write(data.replace('$HERE','.').replace('${MASON_ROOT}','./mason_packages'))" + cat $MASON_PREFIX/bin/mapnik-config +} + +function mason_cflags { + ${MASON_PREFIX}/bin/mapnik-config --cflags +} + +function mason_ldflags { + ${MASON_PREFIX}/bin/mapnik-config --ldflags +} + +function mason_static_libs { + ${MASON_PREFIX}/bin/mapnik-config --dep-libs +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mapnik/3be9ce8fa/.travis.yml b/scripts/mapnik/3be9ce8fa/.travis.yml new file mode 100644 index 000000000..924a48996 --- /dev/null +++ b/scripts/mapnik/3be9ce8fa/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11.3 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + - xutils-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mapnik/3be9ce8fa/script.sh b/scripts/mapnik/3be9ce8fa/script.sh new file mode 100755 index 000000000..e362d0a78 --- /dev/null +++ b/scripts/mapnik/3be9ce8fa/script.sh @@ -0,0 +1,179 @@ +#!/usr/bin/env bash + +MASON_NAME=mapnik +MASON_VERSION=3be9ce8fa +MASON_LIB_FILE=lib/libmapnik.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapnik-v${MASON_VERSION} + if [[ ! -d ${MASON_BUILD_PATH} ]]; then + git clone https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + (cd ${MASON_BUILD_PATH} && git checkout ${MASON_VERSION} && git submodule update --init) + fi +} + +function install() { + ${MASON_DIR}/mason install $1 $2 + MASON_PLATFORM_ID=$(${MASON_DIR}/mason env MASON_PLATFORM_ID) + if [[ ! -d ${MASON_ROOT}/${MASON_PLATFORM_ID}/${1}/${2} ]]; then + if [[ ${3:-false} != false ]]; then + LA_FILE=$(${MASON_DIR}/mason prefix $1 $2)/lib/$3.la + if [[ -f ${LA_FILE} ]]; then + perl -i -p -e 's:\Q$ENV{HOME}/build/mapbox/mason\E:$ENV{PWD}:g' ${LA_FILE} + else + echo "$LA_FILE not found" + fi + fi + fi + ${MASON_DIR}/mason link $1 $2 +} + +ICU_VERSION="57.1" + +function mason_prepare_compile { + install jpeg_turbo 1.5.1 libjpeg + install libpng 1.6.28 libpng + install libtiff 4.0.7 libtiff + install libpq 9.6.2 + install sqlite 3.17.0 libsqlite3 + install expat 2.2.0 libexpat + install icu ${ICU_VERSION} + install proj 4.9.3 libproj + install pixman 0.34.0 libpixman-1 + install cairo 1.14.8 libcairo + install webp 0.6.0 libwebp + install libgdal 2.1.3 libgdal + install boost 1.66.0 + install boost_libsystem 1.66.0 + install boost_libfilesystem 1.66.0 + install boost_libprogram_options 1.66.0 + install boost_libregex_icu57 1.66.0 + install freetype 2.7.1 libfreetype + install harfbuzz 1.4.2-ft libharfbuzz +} + +function mason_compile { + export PATH="${MASON_ROOT}/.link/bin:${PATH}" + MASON_LINKED_REL="${MASON_ROOT}/.link" + MASON_LINKED_ABS="${MASON_ROOT}/.link" + + # The mapnik configure check for c++14 fails when mason hardcodes c++11 in the CXXFLAGS + # So we remove it here + export CXXFLAGS="${CXXFLAGS//-std=c++11}" + + if [[ $(uname -s) == 'Linux' ]]; then + echo "CUSTOM_LDFLAGS = '${LDFLAGS} -Wl,-z,origin -Wl,-rpath=\\\$\$ORIGIN/../lib/ -Wl,-rpath=\\\$\$ORIGIN/../../'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0'" >> config.py + else + echo "CUSTOM_LDFLAGS = '${LDFLAGS}'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS}'" >> config.py + fi + + # setup `mapnik-settings.env` (like bootstrap.sh does) + # note: we don't use bootstrap.sh to be able to control + # mason versions here and use the mason we are running + echo "export PROJ_LIB=${MASON_LINKED_ABS}/share/proj" > mapnik-settings.env + echo "export ICU_DATA=${MASON_LINKED_ABS}/share/icu/${ICU_VERSION}" >> mapnik-settings.env + echo "export GDAL_DATA=${MASON_LINKED_ABS}/share/gdal" >> mapnik-settings.env + + RESULT=0 + + ./configure \ + CXX="${CXX}" \ + CC="${CC}" \ + PREFIX="${MASON_PREFIX}" \ + RUNTIME_LINK="static" \ + INPUT_PLUGINS="all" \ + ENABLE_SONAME=False \ + PKG_CONFIG_PATH="${MASON_LINKED_REL}/lib/pkgconfig" \ + PATH_REMOVE="/usr:/usr/local" \ + BOOST_INCLUDES="${MASON_LINKED_REL}/include" \ + BOOST_LIBS="${MASON_LINKED_REL}/lib" \ + ICU_INCLUDES="${MASON_LINKED_REL}/include" \ + ICU_LIBS="${MASON_LINKED_REL}/lib" \ + HB_INCLUDES="${MASON_LINKED_REL}/include" \ + HB_LIBS="${MASON_LINKED_REL}/lib" \ + PNG_INCLUDES="${MASON_LINKED_REL}/include/libpng16" \ + PNG_LIBS="${MASON_LINKED_REL}/lib" \ + JPEG_INCLUDES="${MASON_LINKED_REL}/include" \ + JPEG_LIBS="${MASON_LINKED_REL}/lib" \ + TIFF_INCLUDES="${MASON_LINKED_REL}/include" \ + TIFF_LIBS="${MASON_LINKED_REL}/lib" \ + WEBP_INCLUDES="${MASON_LINKED_REL}/include" \ + WEBP_LIBS="${MASON_LINKED_REL}/lib" \ + PROJ_INCLUDES="${MASON_LINKED_REL}/include" \ + PROJ_LIBS="${MASON_LINKED_REL}/lib" \ + PG_INCLUDES="${MASON_LINKED_REL}/include" \ + PG_LIBS="${MASON_LINKED_REL}/lib" \ + FREETYPE_INCLUDES="${MASON_LINKED_REL}/include/freetype2" \ + FREETYPE_LIBS="${MASON_LINKED_REL}/lib" \ + SVG_RENDERER=True \ + CAIRO_INCLUDES="${MASON_LINKED_REL}/include" \ + CAIRO_LIBS="${MASON_LINKED_REL}/lib" \ + SQLITE_INCLUDES="${MASON_LINKED_REL}/include" \ + SQLITE_LIBS="${MASON_LINKED_REL}/lib" \ + GDAL_CONFIG="${MASON_LINKED_REL}/bin/gdal-config" \ + PG_CONFIG="${MASON_LINKED_REL}/bin/pg_config" \ + BENCHMARK=False \ + CPP_TESTS=False \ + PGSQL2SQLITE=True \ + SAMPLE_INPUT_PLUGINS=False \ + DEMO=False \ + XMLPARSER="ptree" \ + NO_ATEXIT=True \ + SVG2PNG=True || RESULT=$? + + # if configure failed, dump out config details before exiting + if [[ ${RESULT} != 0 ]]; then + cat ${MASON_BUILD_PATH}"/config.log" + cat config.py + false # then fail + fi + + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + JOBS=4 make + else + JOBS=${MASON_CONCURRENCY} make + fi + + make install + if [[ $(uname -s) == 'Darwin' ]]; then + install_name_tool -id @loader_path/lib/libmapnik.dylib ${MASON_PREFIX}"/lib/libmapnik.dylib"; + PLUGINDIRS=${MASON_PREFIX}"/lib/mapnik/input/*.input"; + for f in $PLUGINDIRS; do + echo $f; + echo `basename $f`; + install_name_tool -id plugins/input/`basename $f` $f; + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../../../lib/libmapnik.dylib $f; + done; + # command line tools + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-index" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-render" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/shapeindex" + fi + # fix mapnik-config entries for deps + HERE=$(pwd) + python -c "import re;data=open('$MASON_PREFIX/bin/mapnik-config','r').read();data=re.sub(r'-(isysroot)\s\/([0-9a-zA-Z_\/\-\.]+)', '', data);open('$MASON_PREFIX/bin/mapnik-config','w').write(data.replace('$HERE','.').replace('${MASON_ROOT}','./mason_packages'))" + cat $MASON_PREFIX/bin/mapnik-config +} + +function mason_cflags { + ${MASON_PREFIX}/bin/mapnik-config --cflags +} + +function mason_ldflags { + ${MASON_PREFIX}/bin/mapnik-config --ldflags +} + +function mason_static_libs { + ${MASON_PREFIX}/bin/mapnik-config --dep-libs +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mapnik/434511c/.travis.yml b/scripts/mapnik/434511c/.travis.yml new file mode 100644 index 000000000..d960a4be4 --- /dev/null +++ b/scripts/mapnik/434511c/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode9.3 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + - xutils-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mapnik/434511c/script.sh b/scripts/mapnik/434511c/script.sh new file mode 100755 index 000000000..e6aae0388 --- /dev/null +++ b/scripts/mapnik/434511c/script.sh @@ -0,0 +1,179 @@ +#!/usr/bin/env bash + +MASON_NAME=mapnik +MASON_VERSION=434511c +MASON_LIB_FILE=lib/libmapnik.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapnik-v${MASON_VERSION} + if [[ ! -d ${MASON_BUILD_PATH} ]]; then + git clone https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + (cd ${MASON_BUILD_PATH} && git checkout ${MASON_VERSION} && git submodule update --init) + fi +} + +function install() { + ${MASON_DIR}/mason install $1 $2 + MASON_PLATFORM_ID=$(${MASON_DIR}/mason env MASON_PLATFORM_ID) + if [[ ! -d ${MASON_ROOT}/${MASON_PLATFORM_ID}/${1}/${2} ]]; then + if [[ ${3:-false} != false ]]; then + LA_FILE=$(${MASON_DIR}/mason prefix $1 $2)/lib/$3.la + if [[ -f ${LA_FILE} ]]; then + perl -i -p -e 's:\Q$ENV{HOME}/build/mapbox/mason\E:$ENV{PWD}:g' ${LA_FILE} + else + echo "$LA_FILE not found" + fi + fi + fi + ${MASON_DIR}/mason link $1 $2 +} + +ICU_VERSION="57.1" + +function mason_prepare_compile { + install jpeg_turbo 1.5.1 libjpeg + install libpng 1.6.28 libpng + install libtiff 4.0.7 libtiff + install libpq 9.6.2 + install sqlite 3.17.0 libsqlite3 + install expat 2.2.0 libexpat + install icu ${ICU_VERSION} + install proj 4.9.3 libproj + install pixman 0.34.0 libpixman-1 + install cairo 1.14.8 libcairo + install webp 0.6.0 libwebp + install libgdal 2.1.3 libgdal + install boost 1.66.0 + install boost_libsystem 1.66.0 + install boost_libfilesystem 1.66.0 + install boost_libprogram_options 1.66.0 + install boost_libregex_icu57 1.66.0 + install freetype 2.7.1 libfreetype + install harfbuzz 1.4.2-ft libharfbuzz +} + +function mason_compile { + export PATH="${MASON_ROOT}/.link/bin:${PATH}" + MASON_LINKED_REL="${MASON_ROOT}/.link" + MASON_LINKED_ABS="${MASON_ROOT}/.link" + + # The mapnik configure check for c++14 fails when mason hardcodes c++11 in the CXXFLAGS + # So we remove it here + export CXXFLAGS="${CXXFLAGS//-std=c++11}" + + if [[ $(uname -s) == 'Linux' ]]; then + echo "CUSTOM_LDFLAGS = '${LDFLAGS} -Wl,-z,origin -Wl,-rpath=\\\$\$ORIGIN/../lib/ -Wl,-rpath=\\\$\$ORIGIN/../../'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0'" >> config.py + else + echo "CUSTOM_LDFLAGS = '${LDFLAGS}'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS}'" >> config.py + fi + + # setup `mapnik-settings.env` (like bootstrap.sh does) + # note: we don't use bootstrap.sh to be able to control + # mason versions here and use the mason we are running + echo "export PROJ_LIB=${MASON_LINKED_ABS}/share/proj" > mapnik-settings.env + echo "export ICU_DATA=${MASON_LINKED_ABS}/share/icu/${ICU_VERSION}" >> mapnik-settings.env + echo "export GDAL_DATA=${MASON_LINKED_ABS}/share/gdal" >> mapnik-settings.env + + RESULT=0 + + ./configure \ + CXX="${CXX}" \ + CC="${CC}" \ + PREFIX="${MASON_PREFIX}" \ + RUNTIME_LINK="static" \ + INPUT_PLUGINS="all" \ + ENABLE_SONAME=False \ + PKG_CONFIG_PATH="${MASON_LINKED_REL}/lib/pkgconfig" \ + PATH_REMOVE="/usr:/usr/local" \ + BOOST_INCLUDES="${MASON_LINKED_REL}/include" \ + BOOST_LIBS="${MASON_LINKED_REL}/lib" \ + ICU_INCLUDES="${MASON_LINKED_REL}/include" \ + ICU_LIBS="${MASON_LINKED_REL}/lib" \ + HB_INCLUDES="${MASON_LINKED_REL}/include" \ + HB_LIBS="${MASON_LINKED_REL}/lib" \ + PNG_INCLUDES="${MASON_LINKED_REL}/include/libpng16" \ + PNG_LIBS="${MASON_LINKED_REL}/lib" \ + JPEG_INCLUDES="${MASON_LINKED_REL}/include" \ + JPEG_LIBS="${MASON_LINKED_REL}/lib" \ + TIFF_INCLUDES="${MASON_LINKED_REL}/include" \ + TIFF_LIBS="${MASON_LINKED_REL}/lib" \ + WEBP_INCLUDES="${MASON_LINKED_REL}/include" \ + WEBP_LIBS="${MASON_LINKED_REL}/lib" \ + PROJ_INCLUDES="${MASON_LINKED_REL}/include" \ + PROJ_LIBS="${MASON_LINKED_REL}/lib" \ + PG_INCLUDES="${MASON_LINKED_REL}/include" \ + PG_LIBS="${MASON_LINKED_REL}/lib" \ + FREETYPE_INCLUDES="${MASON_LINKED_REL}/include/freetype2" \ + FREETYPE_LIBS="${MASON_LINKED_REL}/lib" \ + SVG_RENDERER=True \ + CAIRO_INCLUDES="${MASON_LINKED_REL}/include" \ + CAIRO_LIBS="${MASON_LINKED_REL}/lib" \ + SQLITE_INCLUDES="${MASON_LINKED_REL}/include" \ + SQLITE_LIBS="${MASON_LINKED_REL}/lib" \ + GDAL_CONFIG="${MASON_LINKED_REL}/bin/gdal-config" \ + PG_CONFIG="${MASON_LINKED_REL}/bin/pg_config" \ + BENCHMARK=False \ + CPP_TESTS=False \ + PGSQL2SQLITE=True \ + SAMPLE_INPUT_PLUGINS=False \ + DEMO=False \ + XMLPARSER="ptree" \ + NO_ATEXIT=True \ + SVG2PNG=True || RESULT=$? + + # if configure failed, dump out config details before exiting + if [[ ${RESULT} != 0 ]]; then + cat ${MASON_BUILD_PATH}"/config.log" + cat config.py + false # then fail + fi + + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + JOBS=4 make + else + JOBS=${MASON_CONCURRENCY} make + fi + + make install + if [[ $(uname -s) == 'Darwin' ]]; then + install_name_tool -id @loader_path/lib/libmapnik.dylib ${MASON_PREFIX}"/lib/libmapnik.dylib"; + PLUGINDIRS=${MASON_PREFIX}"/lib/mapnik/input/*.input"; + for f in $PLUGINDIRS; do + echo $f; + echo `basename $f`; + install_name_tool -id plugins/input/`basename $f` $f; + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../../../lib/libmapnik.dylib $f; + done; + # command line tools + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-index" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-render" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/shapeindex" + fi + # fix mapnik-config entries for deps + HERE=$(pwd) + python -c "import re;data=open('$MASON_PREFIX/bin/mapnik-config','r').read();data=re.sub(r'-(isysroot)\s\/([0-9a-zA-Z_\/\-\.]+)', '', data);open('$MASON_PREFIX/bin/mapnik-config','w').write(data.replace('$HERE','.').replace('${MASON_ROOT}','./mason_packages'))" + cat $MASON_PREFIX/bin/mapnik-config +} + +function mason_cflags { + ${MASON_PREFIX}/bin/mapnik-config --cflags +} + +function mason_ldflags { + ${MASON_PREFIX}/bin/mapnik-config --ldflags +} + +function mason_static_libs { + ${MASON_PREFIX}/bin/mapnik-config --dep-libs +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mapnik/98c26bc/script.sh b/scripts/mapnik/98c26bc/script.sh index 26585474a..280787d03 100755 --- a/scripts/mapnik/98c26bc/script.sh +++ b/scripts/mapnik/98c26bc/script.sh @@ -9,7 +9,7 @@ MASON_LIB_FILE=lib/libmapnik.${MASON_DYNLIB_SUFFIX} function mason_load_source { export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapnik-v${MASON_VERSION} if [[ ! -d ${MASON_BUILD_PATH} ]]; then - git clone http://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + git clone https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} (cd ${MASON_BUILD_PATH} && git checkout ${MASON_VERSION} && git submodule update --init) fi } diff --git a/scripts/mapnik/a0ea7db1a/.travis.yml b/scripts/mapnik/a0ea7db1a/.travis.yml new file mode 100644 index 000000000..d960a4be4 --- /dev/null +++ b/scripts/mapnik/a0ea7db1a/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode9.3 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + - xutils-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mapnik/a0ea7db1a/script.sh b/scripts/mapnik/a0ea7db1a/script.sh new file mode 100755 index 000000000..d29e42fab --- /dev/null +++ b/scripts/mapnik/a0ea7db1a/script.sh @@ -0,0 +1,179 @@ +#!/usr/bin/env bash + +MASON_NAME=mapnik +MASON_VERSION=a0ea7db1a +MASON_LIB_FILE=lib/libmapnik.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapnik-v${MASON_VERSION} + if [[ ! -d ${MASON_BUILD_PATH} ]]; then + git clone https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + (cd ${MASON_BUILD_PATH} && git checkout ${MASON_VERSION} && git submodule update --init) + fi +} + +function install() { + ${MASON_DIR}/mason install $1 $2 + MASON_PLATFORM_ID=$(${MASON_DIR}/mason env MASON_PLATFORM_ID) + if [[ ! -d ${MASON_ROOT}/${MASON_PLATFORM_ID}/${1}/${2} ]]; then + if [[ ${3:-false} != false ]]; then + LA_FILE=$(${MASON_DIR}/mason prefix $1 $2)/lib/$3.la + if [[ -f ${LA_FILE} ]]; then + perl -i -p -e 's:\Q$ENV{HOME}/build/mapbox/mason\E:$ENV{PWD}:g' ${LA_FILE} + else + echo "$LA_FILE not found" + fi + fi + fi + ${MASON_DIR}/mason link $1 $2 +} + +ICU_VERSION="57.1" + +function mason_prepare_compile { + install jpeg_turbo 1.5.1 libjpeg + install libpng 1.6.28 libpng + install libtiff 4.0.7 libtiff + install libpq 9.6.2 + install sqlite 3.17.0 libsqlite3 + install expat 2.2.0 libexpat + install icu ${ICU_VERSION} + install proj 4.9.3 libproj + install pixman 0.34.0 libpixman-1 + install cairo 1.14.8 libcairo + install webp 0.6.0 libwebp + install libgdal 2.1.3 libgdal + install boost 1.66.0 + install boost_libsystem 1.66.0 + install boost_libfilesystem 1.66.0 + install boost_libprogram_options 1.66.0 + install boost_libregex_icu57 1.66.0 + install freetype 2.7.1 libfreetype + install harfbuzz 1.4.2-ft libharfbuzz +} + +function mason_compile { + export PATH="${MASON_ROOT}/.link/bin:${PATH}" + MASON_LINKED_REL="${MASON_ROOT}/.link" + MASON_LINKED_ABS="${MASON_ROOT}/.link" + + # The mapnik configure check for c++14 fails when mason hardcodes c++11 in the CXXFLAGS + # So we remove it here + export CXXFLAGS="${CXXFLAGS//-std=c++11}" + + if [[ $(uname -s) == 'Linux' ]]; then + echo "CUSTOM_LDFLAGS = '${LDFLAGS} -Wl,-z,origin -Wl,-rpath=\\\$\$ORIGIN/../lib/ -Wl,-rpath=\\\$\$ORIGIN/../../'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0'" >> config.py + else + echo "CUSTOM_LDFLAGS = '${LDFLAGS}'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS}'" >> config.py + fi + + # setup `mapnik-settings.env` (like bootstrap.sh does) + # note: we don't use bootstrap.sh to be able to control + # mason versions here and use the mason we are running + echo "export PROJ_LIB=${MASON_LINKED_ABS}/share/proj" > mapnik-settings.env + echo "export ICU_DATA=${MASON_LINKED_ABS}/share/icu/${ICU_VERSION}" >> mapnik-settings.env + echo "export GDAL_DATA=${MASON_LINKED_ABS}/share/gdal" >> mapnik-settings.env + + RESULT=0 + + ./configure \ + CXX="${CXX}" \ + CC="${CC}" \ + PREFIX="${MASON_PREFIX}" \ + RUNTIME_LINK="static" \ + INPUT_PLUGINS="all" \ + ENABLE_SONAME=False \ + PKG_CONFIG_PATH="${MASON_LINKED_REL}/lib/pkgconfig" \ + PATH_REMOVE="/usr:/usr/local" \ + BOOST_INCLUDES="${MASON_LINKED_REL}/include" \ + BOOST_LIBS="${MASON_LINKED_REL}/lib" \ + ICU_INCLUDES="${MASON_LINKED_REL}/include" \ + ICU_LIBS="${MASON_LINKED_REL}/lib" \ + HB_INCLUDES="${MASON_LINKED_REL}/include" \ + HB_LIBS="${MASON_LINKED_REL}/lib" \ + PNG_INCLUDES="${MASON_LINKED_REL}/include/libpng16" \ + PNG_LIBS="${MASON_LINKED_REL}/lib" \ + JPEG_INCLUDES="${MASON_LINKED_REL}/include" \ + JPEG_LIBS="${MASON_LINKED_REL}/lib" \ + TIFF_INCLUDES="${MASON_LINKED_REL}/include" \ + TIFF_LIBS="${MASON_LINKED_REL}/lib" \ + WEBP_INCLUDES="${MASON_LINKED_REL}/include" \ + WEBP_LIBS="${MASON_LINKED_REL}/lib" \ + PROJ_INCLUDES="${MASON_LINKED_REL}/include" \ + PROJ_LIBS="${MASON_LINKED_REL}/lib" \ + PG_INCLUDES="${MASON_LINKED_REL}/include" \ + PG_LIBS="${MASON_LINKED_REL}/lib" \ + FREETYPE_INCLUDES="${MASON_LINKED_REL}/include/freetype2" \ + FREETYPE_LIBS="${MASON_LINKED_REL}/lib" \ + SVG_RENDERER=True \ + CAIRO_INCLUDES="${MASON_LINKED_REL}/include" \ + CAIRO_LIBS="${MASON_LINKED_REL}/lib" \ + SQLITE_INCLUDES="${MASON_LINKED_REL}/include" \ + SQLITE_LIBS="${MASON_LINKED_REL}/lib" \ + GDAL_CONFIG="${MASON_LINKED_REL}/bin/gdal-config" \ + PG_CONFIG="${MASON_LINKED_REL}/bin/pg_config" \ + BENCHMARK=False \ + CPP_TESTS=False \ + PGSQL2SQLITE=True \ + SAMPLE_INPUT_PLUGINS=False \ + DEMO=False \ + XMLPARSER="ptree" \ + NO_ATEXIT=True \ + SVG2PNG=True || RESULT=$? + + # if configure failed, dump out config details before exiting + if [[ ${RESULT} != 0 ]]; then + cat ${MASON_BUILD_PATH}"/config.log" + cat config.py + false # then fail + fi + + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + JOBS=4 make + else + JOBS=${MASON_CONCURRENCY} make + fi + + make install + if [[ $(uname -s) == 'Darwin' ]]; then + install_name_tool -id @loader_path/lib/libmapnik.dylib ${MASON_PREFIX}"/lib/libmapnik.dylib"; + PLUGINDIRS=${MASON_PREFIX}"/lib/mapnik/input/*.input"; + for f in $PLUGINDIRS; do + echo $f; + echo `basename $f`; + install_name_tool -id plugins/input/`basename $f` $f; + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../../../lib/libmapnik.dylib $f; + done; + # command line tools + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-index" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-render" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/shapeindex" + fi + # fix mapnik-config entries for deps + HERE=$(pwd) + python -c "import re;data=open('$MASON_PREFIX/bin/mapnik-config','r').read();data=re.sub(r'-(isysroot)\s\/([0-9a-zA-Z_\/\-\.]+)', '', data);open('$MASON_PREFIX/bin/mapnik-config','w').write(data.replace('$HERE','.').replace('${MASON_ROOT}','./mason_packages'))" + cat $MASON_PREFIX/bin/mapnik-config +} + +function mason_cflags { + ${MASON_PREFIX}/bin/mapnik-config --cflags +} + +function mason_ldflags { + ${MASON_PREFIX}/bin/mapnik-config --ldflags +} + +function mason_static_libs { + ${MASON_PREFIX}/bin/mapnik-config --dep-libs +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mapnik/a2f5969/script.sh b/scripts/mapnik/a2f5969/script.sh index 98239c483..7960b42e2 100755 --- a/scripts/mapnik/a2f5969/script.sh +++ b/scripts/mapnik/a2f5969/script.sh @@ -9,7 +9,7 @@ MASON_LIB_FILE=lib/libmapnik.${MASON_DYNLIB_SUFFIX} function mason_load_source { export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapnik-v${MASON_VERSION} if [[ ! -d ${MASON_BUILD_PATH} ]]; then - git clone http://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + git clone https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} (cd ${MASON_BUILD_PATH} && git checkout ${MASON_VERSION} && git submodule update --init) fi } diff --git a/scripts/mapnik/a9d9f7ed9/.travis.yml b/scripts/mapnik/a9d9f7ed9/.travis.yml new file mode 100644 index 000000000..924a48996 --- /dev/null +++ b/scripts/mapnik/a9d9f7ed9/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11.3 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + - xutils-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mapnik/a9d9f7ed9/script.sh b/scripts/mapnik/a9d9f7ed9/script.sh new file mode 100755 index 000000000..27b6a3515 --- /dev/null +++ b/scripts/mapnik/a9d9f7ed9/script.sh @@ -0,0 +1,179 @@ +#!/usr/bin/env bash + +MASON_NAME=mapnik +MASON_VERSION=a9d9f7ed9 +MASON_LIB_FILE=lib/libmapnik.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapnik-v${MASON_VERSION} + if [[ ! -d ${MASON_BUILD_PATH} ]]; then + git clone https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + (cd ${MASON_BUILD_PATH} && git checkout ${MASON_VERSION} && git submodule update --init) + fi +} + +function install() { + ${MASON_DIR}/mason install $1 $2 + MASON_PLATFORM_ID=$(${MASON_DIR}/mason env MASON_PLATFORM_ID) + if [[ ! -d ${MASON_ROOT}/${MASON_PLATFORM_ID}/${1}/${2} ]]; then + if [[ ${3:-false} != false ]]; then + LA_FILE=$(${MASON_DIR}/mason prefix $1 $2)/lib/$3.la + if [[ -f ${LA_FILE} ]]; then + perl -i -p -e 's:\Q$ENV{HOME}/build/mapbox/mason\E:$ENV{PWD}:g' ${LA_FILE} + else + echo "$LA_FILE not found" + fi + fi + fi + ${MASON_DIR}/mason link $1 $2 +} + +ICU_VERSION="57.1" + +function mason_prepare_compile { + install jpeg_turbo 1.5.1 libjpeg + install libpng 1.6.28 libpng + install libtiff 4.0.7 libtiff + install libpq 9.6.2 + install sqlite 3.17.0 libsqlite3 + install expat 2.2.0 libexpat + install icu ${ICU_VERSION} + install proj 4.9.3 libproj + install pixman 0.34.0 libpixman-1 + install cairo 1.14.8 libcairo + install webp 0.6.0 libwebp + install libgdal 2.1.3 libgdal + install boost 1.66.0 + install boost_libsystem 1.66.0 + install boost_libfilesystem 1.66.0 + install boost_libprogram_options 1.66.0 + install boost_libregex_icu57 1.66.0 + install freetype 2.7.1 libfreetype + install harfbuzz 1.4.2-ft libharfbuzz +} + +function mason_compile { + export PATH="${MASON_ROOT}/.link/bin:${PATH}" + MASON_LINKED_REL="${MASON_ROOT}/.link" + MASON_LINKED_ABS="${MASON_ROOT}/.link" + + # The mapnik configure check for c++14 fails when mason hardcodes c++11 in the CXXFLAGS + # So we remove it here + export CXXFLAGS="${CXXFLAGS//-std=c++11}" + + if [[ $(uname -s) == 'Linux' ]]; then + echo "CUSTOM_LDFLAGS = '${LDFLAGS} -Wl,-z,origin -Wl,-rpath=\\\$\$ORIGIN/../lib/ -Wl,-rpath=\\\$\$ORIGIN/../../'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0'" >> config.py + else + echo "CUSTOM_LDFLAGS = '${LDFLAGS}'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS}'" >> config.py + fi + + # setup `mapnik-settings.env` (like bootstrap.sh does) + # note: we don't use bootstrap.sh to be able to control + # mason versions here and use the mason we are running + echo "export PROJ_LIB=${MASON_LINKED_ABS}/share/proj" > mapnik-settings.env + echo "export ICU_DATA=${MASON_LINKED_ABS}/share/icu/${ICU_VERSION}" >> mapnik-settings.env + echo "export GDAL_DATA=${MASON_LINKED_ABS}/share/gdal" >> mapnik-settings.env + + RESULT=0 + + ./configure \ + CXX="${CXX}" \ + CC="${CC}" \ + PREFIX="${MASON_PREFIX}" \ + RUNTIME_LINK="static" \ + INPUT_PLUGINS="all" \ + ENABLE_SONAME=False \ + PKG_CONFIG_PATH="${MASON_LINKED_REL}/lib/pkgconfig" \ + PATH_REMOVE="/usr:/usr/local" \ + BOOST_INCLUDES="${MASON_LINKED_REL}/include" \ + BOOST_LIBS="${MASON_LINKED_REL}/lib" \ + ICU_INCLUDES="${MASON_LINKED_REL}/include" \ + ICU_LIBS="${MASON_LINKED_REL}/lib" \ + HB_INCLUDES="${MASON_LINKED_REL}/include" \ + HB_LIBS="${MASON_LINKED_REL}/lib" \ + PNG_INCLUDES="${MASON_LINKED_REL}/include/libpng16" \ + PNG_LIBS="${MASON_LINKED_REL}/lib" \ + JPEG_INCLUDES="${MASON_LINKED_REL}/include" \ + JPEG_LIBS="${MASON_LINKED_REL}/lib" \ + TIFF_INCLUDES="${MASON_LINKED_REL}/include" \ + TIFF_LIBS="${MASON_LINKED_REL}/lib" \ + WEBP_INCLUDES="${MASON_LINKED_REL}/include" \ + WEBP_LIBS="${MASON_LINKED_REL}/lib" \ + PROJ_INCLUDES="${MASON_LINKED_REL}/include" \ + PROJ_LIBS="${MASON_LINKED_REL}/lib" \ + PG_INCLUDES="${MASON_LINKED_REL}/include" \ + PG_LIBS="${MASON_LINKED_REL}/lib" \ + FREETYPE_INCLUDES="${MASON_LINKED_REL}/include/freetype2" \ + FREETYPE_LIBS="${MASON_LINKED_REL}/lib" \ + SVG_RENDERER=True \ + CAIRO_INCLUDES="${MASON_LINKED_REL}/include" \ + CAIRO_LIBS="${MASON_LINKED_REL}/lib" \ + SQLITE_INCLUDES="${MASON_LINKED_REL}/include" \ + SQLITE_LIBS="${MASON_LINKED_REL}/lib" \ + GDAL_CONFIG="${MASON_LINKED_REL}/bin/gdal-config" \ + PG_CONFIG="${MASON_LINKED_REL}/bin/pg_config" \ + BENCHMARK=False \ + CPP_TESTS=False \ + PGSQL2SQLITE=True \ + SAMPLE_INPUT_PLUGINS=False \ + DEMO=False \ + XMLPARSER="ptree" \ + NO_ATEXIT=True \ + SVG2PNG=True || RESULT=$? + + # if configure failed, dump out config details before exiting + if [[ ${RESULT} != 0 ]]; then + cat ${MASON_BUILD_PATH}"/config.log" + cat config.py + false # then fail + fi + + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + JOBS=4 make + else + JOBS=${MASON_CONCURRENCY} make + fi + + make install + if [[ $(uname -s) == 'Darwin' ]]; then + install_name_tool -id @loader_path/lib/libmapnik.dylib ${MASON_PREFIX}"/lib/libmapnik.dylib"; + PLUGINDIRS=${MASON_PREFIX}"/lib/mapnik/input/*.input"; + for f in $PLUGINDIRS; do + echo $f; + echo `basename $f`; + install_name_tool -id plugins/input/`basename $f` $f; + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../../../lib/libmapnik.dylib $f; + done; + # command line tools + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-index" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-render" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/shapeindex" + fi + # fix mapnik-config entries for deps + HERE=$(pwd) + python -c "import re;data=open('$MASON_PREFIX/bin/mapnik-config','r').read();data=re.sub(r'-(isysroot)\s\/([0-9a-zA-Z_\/\-\.]+)', '', data);open('$MASON_PREFIX/bin/mapnik-config','w').write(data.replace('$HERE','.').replace('${MASON_ROOT}','./mason_packages'))" + cat $MASON_PREFIX/bin/mapnik-config +} + +function mason_cflags { + ${MASON_PREFIX}/bin/mapnik-config --cflags +} + +function mason_ldflags { + ${MASON_PREFIX}/bin/mapnik-config --ldflags +} + +function mason_static_libs { + ${MASON_PREFIX}/bin/mapnik-config --dep-libs +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mapnik/c3eda40e0/.travis.yml b/scripts/mapnik/c3eda40e0/.travis.yml new file mode 100644 index 000000000..cb7c20234 --- /dev/null +++ b/scripts/mapnik/c3eda40e0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11.3 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + - xutils-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mapnik/c3eda40e0/script.sh b/scripts/mapnik/c3eda40e0/script.sh new file mode 100755 index 000000000..8e3d8f0da --- /dev/null +++ b/scripts/mapnik/c3eda40e0/script.sh @@ -0,0 +1,180 @@ +#!/usr/bin/env bash + +MASON_NAME=mapnik +MASON_VERSION=c3eda40e0 +MASON_LIB_FILE=lib/libmapnik.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapnik-v${MASON_VERSION} + if [[ ! -d ${MASON_BUILD_PATH} ]]; then + git clone https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + (cd ${MASON_BUILD_PATH} && git checkout ${MASON_VERSION} && git submodule update --init) + fi +} + +function install() { + ${MASON_DIR}/mason install $1 $2 + MASON_PLATFORM_ID=$(${MASON_DIR}/mason env MASON_PLATFORM_ID) + if [[ ! -d ${MASON_ROOT}/${MASON_PLATFORM_ID}/${1}/${2} ]]; then + if [[ ${3:-false} != false ]]; then + LA_FILE=$(${MASON_DIR}/mason prefix $1 $2)/lib/$3.la + if [[ -f ${LA_FILE} ]]; then + perl -i -p -e 's:\Q$ENV{HOME}/build/mapbox/mason\E:$ENV{PWD}:g' ${LA_FILE} + else + echo "$LA_FILE not found" + fi + fi + fi + ${MASON_DIR}/mason link $1 $2 +} + +ICU_VERSION="58.1" +BOOST_VERSION="1.75.0" + +function mason_prepare_compile { + install jpeg_turbo 1.5.1 libjpeg + install libpng 1.6.28 libpng + install libtiff 4.0.7 libtiff + install libpq 9.6.2 + install sqlite 3.17.0 libsqlite3 + install expat 2.2.0 libexpat + install icu ${ICU_VERSION} + install proj 4.9.3 libproj + install pixman 0.34.0 libpixman-1 + install cairo 1.14.8 libcairo + install webp 0.6.0 libwebp + install libgdal 2.1.3 libgdal + install boost ${BOOST_VERSION} + install boost_libsystem ${BOOST_VERSION} + install boost_libfilesystem ${BOOST_VERSION} + install boost_libprogram_options ${BOOST_VERSION} + install boost_libregex_icu58 ${BOOST_VERSION} + install freetype 2.7.1 libfreetype + install harfbuzz 1.4.2-ft libharfbuzz +} + +function mason_compile { + export PATH="${MASON_ROOT}/.link/bin:${PATH}" + MASON_LINKED_REL="${MASON_ROOT}/.link" + MASON_LINKED_ABS="${MASON_ROOT}/.link" + + # The mapnik configure check for c++14 fails when mason hardcodes c++11 in the CXXFLAGS + # So we remove it here + export CXXFLAGS="${CXXFLAGS//-std=c++11}" + + if [[ $(uname -s) == 'Linux' ]]; then + echo "CUSTOM_LDFLAGS = '${LDFLAGS} -Wl,-z,origin -Wl,-rpath=\\\$\$ORIGIN/../lib/ -Wl,-rpath=\\\$\$ORIGIN/../../'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0'" >> config.py + else + echo "CUSTOM_LDFLAGS = '${LDFLAGS}'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS}'" >> config.py + fi + + # setup `mapnik-settings.env` (like bootstrap.sh does) + # note: we don't use bootstrap.sh to be able to control + # mason versions here and use the mason we are running + echo "export PROJ_LIB=${MASON_LINKED_ABS}/share/proj" > mapnik-settings.env + echo "export ICU_DATA=${MASON_LINKED_ABS}/share/icu/${ICU_VERSION}" >> mapnik-settings.env + echo "export GDAL_DATA=${MASON_LINKED_ABS}/share/gdal" >> mapnik-settings.env + + RESULT=0 + + ./configure \ + CXX="${CXX}" \ + CC="${CC}" \ + PREFIX="${MASON_PREFIX}" \ + RUNTIME_LINK="static" \ + INPUT_PLUGINS="all" \ + ENABLE_SONAME=False \ + PKG_CONFIG_PATH="${MASON_LINKED_REL}/lib/pkgconfig" \ + PATH_REMOVE="/usr:/usr/local" \ + BOOST_INCLUDES="${MASON_LINKED_REL}/include" \ + BOOST_LIBS="${MASON_LINKED_REL}/lib" \ + ICU_INCLUDES="${MASON_LINKED_REL}/include" \ + ICU_LIBS="${MASON_LINKED_REL}/lib" \ + HB_INCLUDES="${MASON_LINKED_REL}/include" \ + HB_LIBS="${MASON_LINKED_REL}/lib" \ + PNG_INCLUDES="${MASON_LINKED_REL}/include/libpng16" \ + PNG_LIBS="${MASON_LINKED_REL}/lib" \ + JPEG_INCLUDES="${MASON_LINKED_REL}/include" \ + JPEG_LIBS="${MASON_LINKED_REL}/lib" \ + TIFF_INCLUDES="${MASON_LINKED_REL}/include" \ + TIFF_LIBS="${MASON_LINKED_REL}/lib" \ + WEBP_INCLUDES="${MASON_LINKED_REL}/include" \ + WEBP_LIBS="${MASON_LINKED_REL}/lib" \ + PROJ_INCLUDES="${MASON_LINKED_REL}/include" \ + PROJ_LIBS="${MASON_LINKED_REL}/lib" \ + PG_INCLUDES="${MASON_LINKED_REL}/include" \ + PG_LIBS="${MASON_LINKED_REL}/lib" \ + FREETYPE_INCLUDES="${MASON_LINKED_REL}/include/freetype2" \ + FREETYPE_LIBS="${MASON_LINKED_REL}/lib" \ + SVG_RENDERER=True \ + CAIRO_INCLUDES="${MASON_LINKED_REL}/include" \ + CAIRO_LIBS="${MASON_LINKED_REL}/lib" \ + SQLITE_INCLUDES="${MASON_LINKED_REL}/include" \ + SQLITE_LIBS="${MASON_LINKED_REL}/lib" \ + GDAL_CONFIG="${MASON_LINKED_REL}/bin/gdal-config" \ + PG_CONFIG="${MASON_LINKED_REL}/bin/pg_config" \ + BENCHMARK=False \ + CPP_TESTS=False \ + PGSQL2SQLITE=True \ + SAMPLE_INPUT_PLUGINS=False \ + DEMO=False \ + XMLPARSER="ptree" \ + NO_ATEXIT=True \ + SVG2PNG=True || RESULT=$? + + # if configure failed, dump out config details before exiting + if [[ ${RESULT} != 0 ]]; then + cat ${MASON_BUILD_PATH}"/config.log" + cat config.py + false # then fail + fi + + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + JOBS=4 make + else + JOBS=${MASON_CONCURRENCY} make + fi + + make install + if [[ $(uname -s) == 'Darwin' ]]; then + install_name_tool -id @loader_path/lib/libmapnik.dylib ${MASON_PREFIX}"/lib/libmapnik.dylib"; + PLUGINDIRS=${MASON_PREFIX}"/lib/mapnik/input/*.input"; + for f in $PLUGINDIRS; do + echo $f; + echo `basename $f`; + install_name_tool -id plugins/input/`basename $f` $f; + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../../../lib/libmapnik.dylib $f; + done; + # command line tools + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-index" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-render" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/shapeindex" + fi + # fix mapnik-config entries for deps + HERE=$(pwd) + python -c "import re;data=open('$MASON_PREFIX/bin/mapnik-config','r').read();data=re.sub(r'-(isysroot)\s\/([0-9a-zA-Z_\/\-\.]+)', '', data);open('$MASON_PREFIX/bin/mapnik-config','w').write(data.replace('$HERE','.').replace('${MASON_ROOT}','./mason_packages'))" + cat $MASON_PREFIX/bin/mapnik-config +} + +function mason_cflags { + ${MASON_PREFIX}/bin/mapnik-config --cflags +} + +function mason_ldflags { + ${MASON_PREFIX}/bin/mapnik-config --ldflags +} + +function mason_static_libs { + ${MASON_PREFIX}/bin/mapnik-config --dep-libs +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mapnik/c6fc956a7/.travis.yml b/scripts/mapnik/c6fc956a7/.travis.yml new file mode 100644 index 000000000..924a48996 --- /dev/null +++ b/scripts/mapnik/c6fc956a7/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11.3 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + - xutils-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mapnik/c6fc956a7/script.sh b/scripts/mapnik/c6fc956a7/script.sh new file mode 100755 index 000000000..5d3418a99 --- /dev/null +++ b/scripts/mapnik/c6fc956a7/script.sh @@ -0,0 +1,179 @@ +#!/usr/bin/env bash + +MASON_NAME=mapnik +MASON_VERSION=c6fc956a7 +MASON_LIB_FILE=lib/libmapnik.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapnik-v${MASON_VERSION} + if [[ ! -d ${MASON_BUILD_PATH} ]]; then + git clone https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + (cd ${MASON_BUILD_PATH} && git checkout ${MASON_VERSION} && git submodule update --init) + fi +} + +function install() { + ${MASON_DIR}/mason install $1 $2 + MASON_PLATFORM_ID=$(${MASON_DIR}/mason env MASON_PLATFORM_ID) + if [[ ! -d ${MASON_ROOT}/${MASON_PLATFORM_ID}/${1}/${2} ]]; then + if [[ ${3:-false} != false ]]; then + LA_FILE=$(${MASON_DIR}/mason prefix $1 $2)/lib/$3.la + if [[ -f ${LA_FILE} ]]; then + perl -i -p -e 's:\Q$ENV{HOME}/build/mapbox/mason\E:$ENV{PWD}:g' ${LA_FILE} + else + echo "$LA_FILE not found" + fi + fi + fi + ${MASON_DIR}/mason link $1 $2 +} + +ICU_VERSION="57.1" + +function mason_prepare_compile { + install jpeg_turbo 1.5.1 libjpeg + install libpng 1.6.28 libpng + install libtiff 4.0.7 libtiff + install libpq 9.6.2 + install sqlite 3.17.0 libsqlite3 + install expat 2.2.0 libexpat + install icu ${ICU_VERSION} + install proj 4.9.3 libproj + install pixman 0.34.0 libpixman-1 + install cairo 1.14.8 libcairo + install webp 0.6.0 libwebp + install libgdal 2.1.3 libgdal + install boost 1.74.0 + install boost_libsystem 1.74.0 + install boost_libfilesystem 1.74.0 + install boost_libprogram_options 1.74.0 + install boost_libregex_icu57 1.74.0 + install freetype 2.7.1 libfreetype + install harfbuzz 1.4.2-ft libharfbuzz +} + +function mason_compile { + export PATH="${MASON_ROOT}/.link/bin:${PATH}" + MASON_LINKED_REL="${MASON_ROOT}/.link" + MASON_LINKED_ABS="${MASON_ROOT}/.link" + + # The mapnik configure check for c++14 fails when mason hardcodes c++11 in the CXXFLAGS + # So we remove it here + export CXXFLAGS="${CXXFLAGS//-std=c++11}" + + if [[ $(uname -s) == 'Linux' ]]; then + echo "CUSTOM_LDFLAGS = '${LDFLAGS} -Wl,-z,origin -Wl,-rpath=\\\$\$ORIGIN/../lib/ -Wl,-rpath=\\\$\$ORIGIN/../../'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0'" >> config.py + else + echo "CUSTOM_LDFLAGS = '${LDFLAGS}'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS}'" >> config.py + fi + + # setup `mapnik-settings.env` (like bootstrap.sh does) + # note: we don't use bootstrap.sh to be able to control + # mason versions here and use the mason we are running + echo "export PROJ_LIB=${MASON_LINKED_ABS}/share/proj" > mapnik-settings.env + echo "export ICU_DATA=${MASON_LINKED_ABS}/share/icu/${ICU_VERSION}" >> mapnik-settings.env + echo "export GDAL_DATA=${MASON_LINKED_ABS}/share/gdal" >> mapnik-settings.env + + RESULT=0 + + ./configure \ + CXX="${CXX}" \ + CC="${CC}" \ + PREFIX="${MASON_PREFIX}" \ + RUNTIME_LINK="static" \ + INPUT_PLUGINS="all" \ + ENABLE_SONAME=False \ + PKG_CONFIG_PATH="${MASON_LINKED_REL}/lib/pkgconfig" \ + PATH_REMOVE="/usr:/usr/local" \ + BOOST_INCLUDES="${MASON_LINKED_REL}/include" \ + BOOST_LIBS="${MASON_LINKED_REL}/lib" \ + ICU_INCLUDES="${MASON_LINKED_REL}/include" \ + ICU_LIBS="${MASON_LINKED_REL}/lib" \ + HB_INCLUDES="${MASON_LINKED_REL}/include" \ + HB_LIBS="${MASON_LINKED_REL}/lib" \ + PNG_INCLUDES="${MASON_LINKED_REL}/include/libpng16" \ + PNG_LIBS="${MASON_LINKED_REL}/lib" \ + JPEG_INCLUDES="${MASON_LINKED_REL}/include" \ + JPEG_LIBS="${MASON_LINKED_REL}/lib" \ + TIFF_INCLUDES="${MASON_LINKED_REL}/include" \ + TIFF_LIBS="${MASON_LINKED_REL}/lib" \ + WEBP_INCLUDES="${MASON_LINKED_REL}/include" \ + WEBP_LIBS="${MASON_LINKED_REL}/lib" \ + PROJ_INCLUDES="${MASON_LINKED_REL}/include" \ + PROJ_LIBS="${MASON_LINKED_REL}/lib" \ + PG_INCLUDES="${MASON_LINKED_REL}/include" \ + PG_LIBS="${MASON_LINKED_REL}/lib" \ + FREETYPE_INCLUDES="${MASON_LINKED_REL}/include/freetype2" \ + FREETYPE_LIBS="${MASON_LINKED_REL}/lib" \ + SVG_RENDERER=True \ + CAIRO_INCLUDES="${MASON_LINKED_REL}/include" \ + CAIRO_LIBS="${MASON_LINKED_REL}/lib" \ + SQLITE_INCLUDES="${MASON_LINKED_REL}/include" \ + SQLITE_LIBS="${MASON_LINKED_REL}/lib" \ + GDAL_CONFIG="${MASON_LINKED_REL}/bin/gdal-config" \ + PG_CONFIG="${MASON_LINKED_REL}/bin/pg_config" \ + BENCHMARK=False \ + CPP_TESTS=False \ + PGSQL2SQLITE=True \ + SAMPLE_INPUT_PLUGINS=False \ + DEMO=False \ + XMLPARSER="ptree" \ + NO_ATEXIT=True \ + SVG2PNG=True || RESULT=$? + + # if configure failed, dump out config details before exiting + if [[ ${RESULT} != 0 ]]; then + cat ${MASON_BUILD_PATH}"/config.log" + cat config.py + false # then fail + fi + + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + JOBS=4 make + else + JOBS=${MASON_CONCURRENCY} make + fi + + make install + if [[ $(uname -s) == 'Darwin' ]]; then + install_name_tool -id @loader_path/lib/libmapnik.dylib ${MASON_PREFIX}"/lib/libmapnik.dylib"; + PLUGINDIRS=${MASON_PREFIX}"/lib/mapnik/input/*.input"; + for f in $PLUGINDIRS; do + echo $f; + echo `basename $f`; + install_name_tool -id plugins/input/`basename $f` $f; + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../../../lib/libmapnik.dylib $f; + done; + # command line tools + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-index" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-render" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/shapeindex" + fi + # fix mapnik-config entries for deps + HERE=$(pwd) + python -c "import re;data=open('$MASON_PREFIX/bin/mapnik-config','r').read();data=re.sub(r'-(isysroot)\s\/([0-9a-zA-Z_\/\-\.]+)', '', data);open('$MASON_PREFIX/bin/mapnik-config','w').write(data.replace('$HERE','.').replace('${MASON_ROOT}','./mason_packages'))" + cat $MASON_PREFIX/bin/mapnik-config +} + +function mason_cflags { + ${MASON_PREFIX}/bin/mapnik-config --cflags +} + +function mason_ldflags { + ${MASON_PREFIX}/bin/mapnik-config --ldflags +} + +function mason_static_libs { + ${MASON_PREFIX}/bin/mapnik-config --dep-libs +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mapnik/da69fdf66/.travis.yml b/scripts/mapnik/da69fdf66/.travis.yml new file mode 100644 index 000000000..d960a4be4 --- /dev/null +++ b/scripts/mapnik/da69fdf66/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode9.3 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + - xutils-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mapnik/da69fdf66/script.sh b/scripts/mapnik/da69fdf66/script.sh new file mode 100755 index 000000000..99b0549ec --- /dev/null +++ b/scripts/mapnik/da69fdf66/script.sh @@ -0,0 +1,179 @@ +#!/usr/bin/env bash + +MASON_NAME=mapnik +MASON_VERSION=da69fdf66 +MASON_LIB_FILE=lib/libmapnik.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapnik-v${MASON_VERSION} + if [[ ! -d ${MASON_BUILD_PATH} ]]; then + git clone https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + (cd ${MASON_BUILD_PATH} && git checkout ${MASON_VERSION} && git submodule update --init) + fi +} + +function install() { + ${MASON_DIR}/mason install $1 $2 + MASON_PLATFORM_ID=$(${MASON_DIR}/mason env MASON_PLATFORM_ID) + if [[ ! -d ${MASON_ROOT}/${MASON_PLATFORM_ID}/${1}/${2} ]]; then + if [[ ${3:-false} != false ]]; then + LA_FILE=$(${MASON_DIR}/mason prefix $1 $2)/lib/$3.la + if [[ -f ${LA_FILE} ]]; then + perl -i -p -e 's:\Q$ENV{HOME}/build/mapbox/mason\E:$ENV{PWD}:g' ${LA_FILE} + else + echo "$LA_FILE not found" + fi + fi + fi + ${MASON_DIR}/mason link $1 $2 +} + +ICU_VERSION="57.1" + +function mason_prepare_compile { + install jpeg_turbo 1.5.1 libjpeg + install libpng 1.6.28 libpng + install libtiff 4.0.7 libtiff + install libpq 9.6.2 + install sqlite 3.17.0 libsqlite3 + install expat 2.2.0 libexpat + install icu ${ICU_VERSION} + install proj 4.9.3 libproj + install pixman 0.34.0 libpixman-1 + install cairo 1.14.8 libcairo + install webp 0.6.0 libwebp + install libgdal 2.1.3 libgdal + install boost 1.66.0 + install boost_libsystem 1.66.0 + install boost_libfilesystem 1.66.0 + install boost_libprogram_options 1.66.0 + install boost_libregex_icu57 1.66.0 + install freetype 2.7.1 libfreetype + install harfbuzz 1.4.2-ft libharfbuzz +} + +function mason_compile { + export PATH="${MASON_ROOT}/.link/bin:${PATH}" + MASON_LINKED_REL="${MASON_ROOT}/.link" + MASON_LINKED_ABS="${MASON_ROOT}/.link" + + # The mapnik configure check for c++14 fails when mason hardcodes c++11 in the CXXFLAGS + # So we remove it here + export CXXFLAGS="${CXXFLAGS//-std=c++11}" + + if [[ $(uname -s) == 'Linux' ]]; then + echo "CUSTOM_LDFLAGS = '${LDFLAGS} -Wl,-z,origin -Wl,-rpath=\\\$\$ORIGIN/../lib/ -Wl,-rpath=\\\$\$ORIGIN/../../'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0'" >> config.py + else + echo "CUSTOM_LDFLAGS = '${LDFLAGS}'" > config.py + echo "CUSTOM_CXXFLAGS = '${CXXFLAGS}'" >> config.py + fi + + # setup `mapnik-settings.env` (like bootstrap.sh does) + # note: we don't use bootstrap.sh to be able to control + # mason versions here and use the mason we are running + echo "export PROJ_LIB=${MASON_LINKED_ABS}/share/proj" > mapnik-settings.env + echo "export ICU_DATA=${MASON_LINKED_ABS}/share/icu/${ICU_VERSION}" >> mapnik-settings.env + echo "export GDAL_DATA=${MASON_LINKED_ABS}/share/gdal" >> mapnik-settings.env + + RESULT=0 + + ./configure \ + CXX="${CXX}" \ + CC="${CC}" \ + PREFIX="${MASON_PREFIX}" \ + RUNTIME_LINK="static" \ + INPUT_PLUGINS="all" \ + ENABLE_SONAME=False \ + PKG_CONFIG_PATH="${MASON_LINKED_REL}/lib/pkgconfig" \ + PATH_REMOVE="/usr:/usr/local" \ + BOOST_INCLUDES="${MASON_LINKED_REL}/include" \ + BOOST_LIBS="${MASON_LINKED_REL}/lib" \ + ICU_INCLUDES="${MASON_LINKED_REL}/include" \ + ICU_LIBS="${MASON_LINKED_REL}/lib" \ + HB_INCLUDES="${MASON_LINKED_REL}/include" \ + HB_LIBS="${MASON_LINKED_REL}/lib" \ + PNG_INCLUDES="${MASON_LINKED_REL}/include/libpng16" \ + PNG_LIBS="${MASON_LINKED_REL}/lib" \ + JPEG_INCLUDES="${MASON_LINKED_REL}/include" \ + JPEG_LIBS="${MASON_LINKED_REL}/lib" \ + TIFF_INCLUDES="${MASON_LINKED_REL}/include" \ + TIFF_LIBS="${MASON_LINKED_REL}/lib" \ + WEBP_INCLUDES="${MASON_LINKED_REL}/include" \ + WEBP_LIBS="${MASON_LINKED_REL}/lib" \ + PROJ_INCLUDES="${MASON_LINKED_REL}/include" \ + PROJ_LIBS="${MASON_LINKED_REL}/lib" \ + PG_INCLUDES="${MASON_LINKED_REL}/include" \ + PG_LIBS="${MASON_LINKED_REL}/lib" \ + FREETYPE_INCLUDES="${MASON_LINKED_REL}/include/freetype2" \ + FREETYPE_LIBS="${MASON_LINKED_REL}/lib" \ + SVG_RENDERER=True \ + CAIRO_INCLUDES="${MASON_LINKED_REL}/include" \ + CAIRO_LIBS="${MASON_LINKED_REL}/lib" \ + SQLITE_INCLUDES="${MASON_LINKED_REL}/include" \ + SQLITE_LIBS="${MASON_LINKED_REL}/lib" \ + GDAL_CONFIG="${MASON_LINKED_REL}/bin/gdal-config" \ + PG_CONFIG="${MASON_LINKED_REL}/bin/pg_config" \ + BENCHMARK=False \ + CPP_TESTS=False \ + PGSQL2SQLITE=True \ + SAMPLE_INPUT_PLUGINS=False \ + DEMO=False \ + XMLPARSER="ptree" \ + NO_ATEXIT=True \ + SVG2PNG=True || RESULT=$? + + # if configure failed, dump out config details before exiting + if [[ ${RESULT} != 0 ]]; then + cat ${MASON_BUILD_PATH}"/config.log" + cat config.py + false # then fail + fi + + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + JOBS=4 make + else + JOBS=${MASON_CONCURRENCY} make + fi + + make install + if [[ $(uname -s) == 'Darwin' ]]; then + install_name_tool -id @loader_path/lib/libmapnik.dylib ${MASON_PREFIX}"/lib/libmapnik.dylib"; + PLUGINDIRS=${MASON_PREFIX}"/lib/mapnik/input/*.input"; + for f in $PLUGINDIRS; do + echo $f; + echo `basename $f`; + install_name_tool -id plugins/input/`basename $f` $f; + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../../../lib/libmapnik.dylib $f; + done; + # command line tools + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-index" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/mapnik-render" + install_name_tool -change ${MASON_PREFIX}"/lib/libmapnik.dylib" @loader_path/../lib/libmapnik.dylib ${MASON_PREFIX}"/bin/shapeindex" + fi + # fix mapnik-config entries for deps + HERE=$(pwd) + python -c "import re;data=open('$MASON_PREFIX/bin/mapnik-config','r').read();data=re.sub(r'-(isysroot)\s\/([0-9a-zA-Z_\/\-\.]+)', '', data);open('$MASON_PREFIX/bin/mapnik-config','w').write(data.replace('$HERE','.').replace('${MASON_ROOT}','./mason_packages'))" + cat $MASON_PREFIX/bin/mapnik-config +} + +function mason_cflags { + ${MASON_PREFIX}/bin/mapnik-config --cflags +} + +function mason_ldflags { + ${MASON_PREFIX}/bin/mapnik-config --ldflags +} + +function mason_static_libs { + ${MASON_PREFIX}/bin/mapnik-config --dep-libs +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mapnik/df0bbe4/script.sh b/scripts/mapnik/df0bbe4/script.sh index d0df93902..24c4f612a 100755 --- a/scripts/mapnik/df0bbe4/script.sh +++ b/scripts/mapnik/df0bbe4/script.sh @@ -9,7 +9,7 @@ MASON_LIB_FILE=lib/libmapnik.${MASON_DYNLIB_SUFFIX} function mason_load_source { export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapnik-v${MASON_VERSION} if [[ ! -d ${MASON_BUILD_PATH} ]]; then - git clone http://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + git clone https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} (cd ${MASON_BUILD_PATH} && git checkout ${MASON_VERSION} && git submodule update --init) fi } diff --git a/scripts/mapnik/f02a25901/script.sh b/scripts/mapnik/f02a25901/script.sh index 443fed16c..69bd52fd8 100755 --- a/scripts/mapnik/f02a25901/script.sh +++ b/scripts/mapnik/f02a25901/script.sh @@ -9,7 +9,7 @@ MASON_LIB_FILE=lib/libmapnik.${MASON_DYNLIB_SUFFIX} function mason_load_source { export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapnik-v${MASON_VERSION} if [[ ! -d ${MASON_BUILD_PATH} ]]; then - git clone http://github.com/mapnik/mapnik ${MASON_BUILD_PATH} + git clone https://github.com/mapnik/mapnik ${MASON_BUILD_PATH} (cd ${MASON_BUILD_PATH} && git checkout ${MASON_VERSION} && git submodule update --init) fi } diff --git a/scripts/mbgl-core/1.6.0-cxx11abi/.travis.yml b/scripts/mbgl-core/1.6.0-cxx11abi/.travis.yml new file mode 100644 index 000000000..8509ace93 --- /dev/null +++ b/scripts/mbgl-core/1.6.0-cxx11abi/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode11 + - os: linux + dist: bionic + compiler: clang + +before_script: + - if [[ ${MASON_PLATFORM} == "linux" && ${CXX} == "clang++" ]]; then export CXX="clang++-6" CC="clang-6" ; fi + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mbgl-core/1.6.0-cxx11abi/script.sh b/scripts/mbgl-core/1.6.0-cxx11abi/script.sh new file mode 100755 index 000000000..4828a0fc8 --- /dev/null +++ b/scripts/mbgl-core/1.6.0-cxx11abi/script.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +LIB_VERSION=1.6.0 + +MASON_NAME=mbgl-core +MASON_VERSION=${LIB_VERSION}-cxx11abi +# used to target future release +SHA=bf4c734 +MASON_LIB_FILE=lib/libmbgl-core.a + +. ${MASON_DIR}/mason.sh + + +function mason_load_source { + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapbox-gl-native-maps-v${LIB_VERSION} + if [[ ! -d ${MASON_BUILD_PATH} ]]; then + git clone https://github.com/mapbox/mapbox-gl-native ${MASON_BUILD_PATH} + fi + (cd ${MASON_BUILD_PATH} && git fetch -v && git checkout ${SHA} && git submodule update --init --recursive) +} + +function mason_prepare_compile { + NINJA_VERSION=1.9.0 + ${MASON_DIR}/mason install ninja ${NINJA_VERSION} + MASON_NINJA=$(${MASON_DIR}/mason prefix ninja ${NINJA_VERSION}) +} + +function mason_compile { + mkdir -p build + cd build + cmake ../ \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} -DCMAKE_BUILD_TYPE=Release \ + -DMBGL_WITH_CORE_ONLY=ON \ + -DMBGL_WITH_OPENGL=OFF \ + -DMBGL_WITH_WERROR=OFF \ + -G Ninja -DCMAKE_MAKE_PROGRAM=${MASON_NINJA}/bin/ninja \ + -DCMAKE_CXX_COMPILER="$CXX" \ + -DCMAKE_C_COMPILER="$CC" + ${MASON_NINJA}/bin/ninja mbgl-core -j4 + echo "making directories at ${MASON_PREFIX}/" + mkdir -p ${MASON_PREFIX}/include + mkdir -p ${MASON_PREFIX}/lib + echo "copying libraries to ${MASON_PREFIX}/lib/" + cp *.a ${MASON_PREFIX}/lib/ + echo "copying source files to ${MASON_PREFIX}/" + cp -r ../include ${MASON_PREFIX}/ + cp -r ../platform ${MASON_PREFIX}/ + cp -r ../src ${MASON_PREFIX}/ + cp -r ../vendor ${MASON_PREFIX}/include/mbgl/ +} + +function mason_cflags { + : +} + +function mason_static_libs { + : +} + +function mason_ldflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mbgl-core/1.6.0/.travis.yml b/scripts/mbgl-core/1.6.0/.travis.yml new file mode 100644 index 000000000..2afd469e8 --- /dev/null +++ b/scripts/mbgl-core/1.6.0/.travis.yml @@ -0,0 +1,17 @@ +language: generic + +addons: + apt: + sources: [ 'ubuntu-toolchain-r-test' ] + packages: [ 'libstdc++-5-dev' ] + +matrix: + include: + - os: osx + osx_image: xcode11 + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mbgl-core/1.6.0/script.sh b/scripts/mbgl-core/1.6.0/script.sh new file mode 100755 index 000000000..2b6e17dd9 --- /dev/null +++ b/scripts/mbgl-core/1.6.0/script.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +MASON_NAME=mbgl-core +MASON_VERSION=1.6.0 +# used to target future release +SHA=bf4c734 +MASON_LIB_FILE=lib/libmbgl-core.a + +. ${MASON_DIR}/mason.sh + + +function mason_load_source { + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapbox-gl-native-maps-v${MASON_VERSION} + if [[ ! -d ${MASON_BUILD_PATH} ]]; then + git clone https://github.com/mapbox/mapbox-gl-native ${MASON_BUILD_PATH} + fi + (cd ${MASON_BUILD_PATH} && git fetch -v && git checkout ${SHA} && git submodule update --init --recursive) +} + +function mason_prepare_compile { + CCACHE_VERSION=3.7.2 + CMAKE_VERSION=3.15.2 + NINJA_VERSION=1.9.0 + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + ${MASON_DIR}/mason install cmake ${CMAKE_VERSION} + MASON_CMAKE=$(${MASON_DIR}/mason prefix cmake ${CMAKE_VERSION}) + ${MASON_DIR}/mason install ninja ${NINJA_VERSION} + MASON_NINJA=$(${MASON_DIR}/mason prefix ninja ${NINJA_VERSION}) +} + +function mason_compile { + mkdir -p build + cd build + ${MASON_CMAKE}/bin/cmake ../ \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} -DCMAKE_BUILD_TYPE=Release \ + -DMBGL_WITH_CORE_ONLY=ON \ + -DMBGL_WITH_OPENGL=OFF \ + -DMBGL_WITH_WERROR=OFF \ + -G Ninja -DCMAKE_MAKE_PROGRAM=${MASON_NINJA}/bin/ninja \ + -DCMAKE_CXX_COMPILER_LAUNCHER=${MASON_CCACHE}/bin/ccache \ + -DCMAKE_CXX_COMPILER="$CXX" \ + -DCMAKE_C_COMPILER="$CC" + ${MASON_NINJA}/bin/ninja mbgl-core -j4 + echo "making directories at ${MASON_PREFIX}/" + mkdir -p ${MASON_PREFIX}/include + mkdir -p ${MASON_PREFIX}/lib + echo "copying libraries to ${MASON_PREFIX}/lib/" + cp *.a ${MASON_PREFIX}/lib/ + echo "copying source files to ${MASON_PREFIX}/" + cp -r ../include ${MASON_PREFIX}/ + cp -r ../platform ${MASON_PREFIX}/ + cp -r ../src ${MASON_PREFIX}/ + cp -r ../vendor ${MASON_PREFIX}/include/mbgl/ +} + +function mason_cflags { + : +} + +function mason_static_libs { + : +} + +function mason_ldflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mbgl-core/20f880e-asan/.travis.yml b/scripts/mbgl-core/20f880e-asan/.travis.yml new file mode 100644 index 000000000..1ea1afc14 --- /dev/null +++ b/scripts/mbgl-core/20f880e-asan/.travis.yml @@ -0,0 +1,26 @@ +language: generic + +addons: + apt: + sources: [ 'ubuntu-toolchain-r-test' ] + packages: [ 'libstdc++-5-dev', + 'libxi-dev', + 'libglu1-mesa-dev', + 'x11proto-randr-dev', + 'x11proto-xext-dev', + 'libxrandr-dev', + 'x11proto-xf86vidmode-dev', + 'libxxf86vm-dev', + 'libxcursor-dev', + 'libxinerama-dev' ] + +matrix: + include: + - os: osx + osx_image: xcode9.2 + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mbgl-core/20f880e-asan/script.sh b/scripts/mbgl-core/20f880e-asan/script.sh new file mode 100755 index 000000000..81a93297b --- /dev/null +++ b/scripts/mbgl-core/20f880e-asan/script.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash + +MASON_NAME=mbgl-core +MASON_VERSION=20f880e-asan +MASON_LIB_FILE=lib/libmbgl-core.a + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/mapbox-gl-native/tarball/20f880e \ + 6a90311c6f6edf36f4b1d85efabda78ade427765 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapbox-mapbox-gl-native-20f880e +} + +function mason_prepare_compile { + CCACHE_VERSION=3.3.4 + CMAKE_VERSION=3.8.2 + NINJA_VERSION=1.7.2 + LLVM_VERSION=6.0.1 + ${MASON_DIR}/mason install clang++ ${LLVM_VERSION} + MASON_LLVM=$(${MASON_DIR}/mason prefix clang++ ${LLVM_VERSION}) + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + ${MASON_DIR}/mason install cmake ${CMAKE_VERSION} + MASON_CMAKE=$(${MASON_DIR}/mason prefix cmake ${CMAKE_VERSION}) + ${MASON_DIR}/mason install ninja ${NINJA_VERSION} + MASON_NINJA=$(${MASON_DIR}/mason prefix ninja ${NINJA_VERSION}) +} + +function mason_compile { + mkdir -p build + cd build + export CXXFLAGS="-fsanitize=address,undefined,integer,leak ${CXXFLAGS}" + export LDFLAGS="-fsanitize=address,undefined,integer,leak ${LDFLAGS}" + ${MASON_CMAKE}/bin/cmake ../ \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} -DCMAKE_BUILD_TYPE=Debug \ + -DWITH_NODEJS=OFF -DWITH_ERROR=OFF \ + -G Ninja -DCMAKE_MAKE_PROGRAM=${MASON_NINJA}/bin/ninja \ + -DCMAKE_CXX_COMPILER_LAUNCHER=${MASON_CCACHE}/bin/ccache \ + -DCMAKE_CXX_COMPILER="${MASON_LLVM}/bin/clang++" \ + -DCMAKE_C_COMPILER="${MASON_LLVM}/bin/clang" \ + -DCMAKE_MODULE_LINKER_FLAGS="${LDFLAGS}" \ + -DCMAKE_SHARED_LINKER_FLAGS="${LDFLAGS}" \ + -DCMAKE_EXE_LINKER_FLAGS="${LDFLAGS}" \ + -DCMAKE_CXX_FLAGS="${CXXFLAGS}" + ${MASON_NINJA}/bin/ninja mbgl-core -j4 + mkdir -p ${MASON_PREFIX}/include + mkdir -p ${MASON_PREFIX}/share + mkdir -p ${MASON_PREFIX}/lib + cp libmbgl-core.a ${MASON_PREFIX}/lib/ + cp -r ../include ${MASON_PREFIX}/ + cp -r ../platform ${MASON_PREFIX}/include/mbgl/ + cp -r ../src ${MASON_PREFIX}/include/mbgl/ + cp -r ../vendor ${MASON_PREFIX}/include/mbgl/ +} + +function mason_cflags { + : +} + +function mason_static_libs { + : +} + +function mason_ldflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mbgl-core/20f880e/.travis.yml b/scripts/mbgl-core/20f880e/.travis.yml new file mode 100644 index 000000000..1ea1afc14 --- /dev/null +++ b/scripts/mbgl-core/20f880e/.travis.yml @@ -0,0 +1,26 @@ +language: generic + +addons: + apt: + sources: [ 'ubuntu-toolchain-r-test' ] + packages: [ 'libstdc++-5-dev', + 'libxi-dev', + 'libglu1-mesa-dev', + 'x11proto-randr-dev', + 'x11proto-xext-dev', + 'libxrandr-dev', + 'x11proto-xf86vidmode-dev', + 'libxxf86vm-dev', + 'libxcursor-dev', + 'libxinerama-dev' ] + +matrix: + include: + - os: osx + osx_image: xcode9.2 + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mbgl-core/20f880e/script.sh b/scripts/mbgl-core/20f880e/script.sh new file mode 100755 index 000000000..213f09d04 --- /dev/null +++ b/scripts/mbgl-core/20f880e/script.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +MASON_NAME=mbgl-core +MASON_VERSION=20f880e +MASON_LIB_FILE=lib/libmbgl-core.a + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/mapbox-gl-native/tarball/${MASON_VERSION} \ + 6a90311c6f6edf36f4b1d85efabda78ade427765 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapbox-mapbox-gl-native-${MASON_VERSION} +} + +function mason_prepare_compile { + CCACHE_VERSION=3.3.4 + CMAKE_VERSION=3.8.2 + NINJA_VERSION=1.7.2 + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + ${MASON_DIR}/mason install cmake ${CMAKE_VERSION} + MASON_CMAKE=$(${MASON_DIR}/mason prefix cmake ${CMAKE_VERSION}) + ${MASON_DIR}/mason install ninja ${NINJA_VERSION} + MASON_NINJA=$(${MASON_DIR}/mason prefix ninja ${NINJA_VERSION}) +} + +function mason_compile { + mkdir -p build + cd build + ${MASON_CMAKE}/bin/cmake ../ \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} -DCMAKE_BUILD_TYPE=Release \ + -DWITH_NODEJS=OFF -DWITH_ERROR=OFF \ + -G Ninja -DCMAKE_MAKE_PROGRAM=${MASON_NINJA}/bin/ninja \ + -DCMAKE_CXX_COMPILER_LAUNCHER=${MASON_CCACHE}/bin/ccache \ + -DCMAKE_CXX_COMPILER="$CXX" \ + -DCMAKE_C_COMPILER="$CC" + ${MASON_NINJA}/bin/ninja mbgl-core -j4 + mkdir -p ${MASON_PREFIX}/include + mkdir -p ${MASON_PREFIX}/share + mkdir -p ${MASON_PREFIX}/lib + cp libmbgl-core.a ${MASON_PREFIX}/lib/ + cp -r ../include ${MASON_PREFIX}/ + cp -r ../platform ${MASON_PREFIX}/include/mbgl/ + cp -r ../src ${MASON_PREFIX}/include/mbgl/ + cp -r ../vendor ${MASON_PREFIX}/include/mbgl/ +} + +function mason_cflags { + : +} + +function mason_static_libs { + : +} + +function mason_ldflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mbgl-core/a3a661e-asan/.travis.yml b/scripts/mbgl-core/a3a661e-asan/.travis.yml new file mode 100644 index 000000000..ac80284be --- /dev/null +++ b/scripts/mbgl-core/a3a661e-asan/.travis.yml @@ -0,0 +1,27 @@ +language: generic + +addons: + apt: + sources: [ 'ubuntu-toolchain-r-test' ] + packages: [ 'libstdc++-5-dev', + 'libxi-dev', + 'libglu1-mesa-dev', + 'x11proto-randr-dev', + 'x11proto-xext-dev', + 'libxrandr-dev', + 'x11proto-xf86vidmode-dev', + 'libxxf86vm-dev', + 'libxcursor-dev', + 'libxinerama-dev' ] + +matrix: + include: + - os: osx + osx_image: xcode9.2 + - os: linux + dist: trusty + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mbgl-core/a3a661e-asan/script.sh b/scripts/mbgl-core/a3a661e-asan/script.sh new file mode 100755 index 000000000..651db5bfb --- /dev/null +++ b/scripts/mbgl-core/a3a661e-asan/script.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash + +MASON_NAME=mbgl-core +MASON_VERSION=a3a661e-asan +MASON_VERSION2=a3a661e +MASON_LIB_FILE=lib/libmbgl-core.a + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mbgl-${MASON_VERSION2} + if [[ ! -d ${MASON_BUILD_PATH} ]]; then + git clone https://github.com/mapbox/mapbox-gl-native ${MASON_BUILD_PATH} + (cd ${MASON_BUILD_PATH} && git checkout ${MASON_VERSION2} && git submodule update --init) + fi +} + +function mason_prepare_compile { + CCACHE_VERSION=3.7.2 + CMAKE_VERSION=3.15.2 + NINJA_VERSION=1.9.0 + LLVM_VERSION=8.0.0 + ${MASON_DIR}/mason install clang++ ${LLVM_VERSION} + MASON_LLVM=$(${MASON_DIR}/mason prefix clang++ ${LLVM_VERSION}) + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + ${MASON_DIR}/mason install cmake ${CMAKE_VERSION} + MASON_CMAKE=$(${MASON_DIR}/mason prefix cmake ${CMAKE_VERSION}) + ${MASON_DIR}/mason install ninja ${NINJA_VERSION} + MASON_NINJA=$(${MASON_DIR}/mason prefix ninja ${NINJA_VERSION}) +} + +function mason_compile { + mkdir -p build + rm -rf build/* + cd build + # MBGL uses c++14 + export CXXFLAGS="${CXXFLAGS//-std=c++11}" + # MBGL uses 10.11 + export CXXFLAGS="${CXXFLAGS//-mmacosx-version-min=10.8}" + export CXXFLAGS="-fsanitize=address,undefined,integer,leak ${CXXFLAGS}" + export LDFLAGS="-fsanitize=address,undefined,integer,leak ${LDFLAGS}" + ${MASON_CMAKE}/bin/cmake ../ \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} -DCMAKE_BUILD_TYPE=Debug \ + -DWITH_NODEJS=OFF -DWITH_ERROR=OFF \ + -G Ninja -DCMAKE_MAKE_PROGRAM=${MASON_NINJA}/bin/ninja \ + -DCMAKE_CXX_COMPILER_LAUNCHER=${MASON_CCACHE}/bin/ccache \ + -DCMAKE_CXX_COMPILER="${MASON_LLVM}/bin/clang++" \ + -DCMAKE_C_COMPILER="${MASON_LLVM}/bin/clang" + ${MASON_NINJA}/bin/ninja mbgl-core -j4 -v + mkdir -p ${MASON_PREFIX}/include + mkdir -p ${MASON_PREFIX}/share + mkdir -p ${MASON_PREFIX}/lib + cp libmbgl-core.a ${MASON_PREFIX}/lib/ + # linux does not vendor icu, but rather pulls from mason + if [ ${MASON_PLATFORM} != 'linux' ]; then + cp libicu.a ${MASON_PREFIX}/lib/ + fi + cp -r ../include ${MASON_PREFIX}/ + cp -r ../platform ${MASON_PREFIX}/include/mbgl/ + cp -r ../src ${MASON_PREFIX}/include/mbgl/ + cp -r ../vendor ${MASON_PREFIX}/include/mbgl/ +} + +function mason_cflags { + : +} + +function mason_static_libs { + : +} + +function mason_ldflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mbgl-core/a3a661e/.travis.yml b/scripts/mbgl-core/a3a661e/.travis.yml new file mode 100644 index 000000000..ac80284be --- /dev/null +++ b/scripts/mbgl-core/a3a661e/.travis.yml @@ -0,0 +1,27 @@ +language: generic + +addons: + apt: + sources: [ 'ubuntu-toolchain-r-test' ] + packages: [ 'libstdc++-5-dev', + 'libxi-dev', + 'libglu1-mesa-dev', + 'x11proto-randr-dev', + 'x11proto-xext-dev', + 'libxrandr-dev', + 'x11proto-xf86vidmode-dev', + 'libxxf86vm-dev', + 'libxcursor-dev', + 'libxinerama-dev' ] + +matrix: + include: + - os: osx + osx_image: xcode9.2 + - os: linux + dist: trusty + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mbgl-core/a3a661e/script.sh b/scripts/mbgl-core/a3a661e/script.sh new file mode 100755 index 000000000..e28a30528 --- /dev/null +++ b/scripts/mbgl-core/a3a661e/script.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +MASON_NAME=mbgl-core +MASON_VERSION=a3a661e +MASON_LIB_FILE=lib/libmbgl-core.a + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mbgl-${MASON_VERSION} + if [[ ! -d ${MASON_BUILD_PATH} ]]; then + git clone https://github.com/mapbox/mapbox-gl-native ${MASON_BUILD_PATH} + (cd ${MASON_BUILD_PATH} && git checkout ${MASON_VERSION} && git submodule update --init) + fi +} + +function mason_prepare_compile { + CCACHE_VERSION=3.7.2 + CMAKE_VERSION=3.15.2 + NINJA_VERSION=1.9.0 + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + ${MASON_DIR}/mason install cmake ${CMAKE_VERSION} + MASON_CMAKE=$(${MASON_DIR}/mason prefix cmake ${CMAKE_VERSION}) + ${MASON_DIR}/mason install ninja ${NINJA_VERSION} + MASON_NINJA=$(${MASON_DIR}/mason prefix ninja ${NINJA_VERSION}) +} + +function mason_compile { + mkdir -p build + rm -rf build/* + cd build + # MBGL uses c++14 + export CXXFLAGS="${CXXFLAGS//-std=c++11}" + # MBGL uses 10.11 + export CXXFLAGS="${CXXFLAGS//-mmacosx-version-min=10.8}" + + ${MASON_CMAKE}/bin/cmake ../ \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} -DCMAKE_BUILD_TYPE=Release \ + -DWITH_NODEJS=OFF -DWITH_ERROR=OFF \ + -G Ninja -DCMAKE_MAKE_PROGRAM=${MASON_NINJA}/bin/ninja \ + -DCMAKE_CXX_COMPILER_LAUNCHER=${MASON_CCACHE}/bin/ccache \ + -DCMAKE_CXX_COMPILER="$CXX" \ + -DCMAKE_C_COMPILER="$CC" + ${MASON_NINJA}/bin/ninja mbgl-core -j4 -v + mkdir -p ${MASON_PREFIX}/include + mkdir -p ${MASON_PREFIX}/share + mkdir -p ${MASON_PREFIX}/lib + cp libmbgl-core.a ${MASON_PREFIX}/lib/ + # linux does not vendor icu, but rather pulls from mason + if [ ${MASON_PLATFORM} != 'linux' ]; then + cp libicu.a ${MASON_PREFIX}/lib/ + fi + cp -r ../include ${MASON_PREFIX}/ + cp -r ../platform ${MASON_PREFIX}/include/mbgl/ + cp -r ../src ${MASON_PREFIX}/include/mbgl/ + cp -r ../vendor ${MASON_PREFIX}/include/mbgl/ +} + +function mason_cflags { + : +} + +function mason_static_libs { + : +} + +function mason_ldflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/mesa/19.1.6/.travis.yml b/scripts/mesa/19.1.6/.travis.yml new file mode 100644 index 000000000..aae306ef8 --- /dev/null +++ b/scripts/mesa/19.1.6/.travis.yml @@ -0,0 +1,42 @@ +language: generic + +matrix: + include: + - os: linux + dist: bionic + addons: + apt: + packages: + - libstdc++-5-dev + - llvm-8-dev + - llvm-8-tools + - python3-pip + - python3-setuptools + - pkg-config + - libdrm-dev + - elfutils + - libelf-dev + - bison + - flex + - libwayland-dev + - wayland-protocols + - libwayland-egl-backend-dev + - libx11-dev + - libxext-dev + - libxdamage-dev + - libxcb-glx0-dev + - libx11-xcb-dev + - libxcb-dri2-0-dev + - libxcb-dri3-dev + - libxcb-present-dev + - libxshmfence-dev + - libxxf86vm-dev + - libxrandr-dev + - gettext + - ninja-build + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/mesa/19.1.6/script.sh b/scripts/mesa/19.1.6/script.sh new file mode 100644 index 000000000..c71f65390 --- /dev/null +++ b/scripts/mesa/19.1.6/script.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +MASON_NAME=mesa +MASON_VERSION=19.1.6 +MASON_LIB_FILE=lib/x86_64-linux-gnu/libOSMesa.so.8.0.0 + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://mesa.freedesktop.org/archive/mesa-${MASON_VERSION}.tar.xz \ + 9849dc6e3f2f6daa30a69dddefb2a1e25f1dfec7 + + mason_extract_tar_xz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mesa-${MASON_VERSION} +} + +function mason_prepare_compile { + python3 -m pip install meson mako +} + +function mason_compile { + meson builddir/ \ + -D shader-cache=true \ + -D buildtype=release \ + -D gles2=true \ + -D shared-llvm=false \ + -D osmesa=gallium \ + -D dri-drivers=[] \ + -D vulkan-drivers=[] \ + -D glx=gallium-xlib \ + -D gallium-drivers=swrast,swr \ + -D prefix=${MASON_PREFIX} + ninja -C builddir/ + ninja -C builddir/ install +} + +function mason_cflags { + echo -I${MASON_PREFIX}/include +} + +function mason_ldflags { + # We include just the library path. Users are expected to provide additional flags + # depending on which of the packaged libraries they actually want to link: + # + # * For GLX: -lGL -lX11 + # * For EGL: -lGLESv2 -lEGL -lgbm + # * For OSMesa: -lOSMesa + # + echo -L${MASON_PREFIX}/lib +} + +mason_run "$@" diff --git a/scripts/nasm/2.11.06/script.sh b/scripts/nasm/2.11.06/script.sh index f14091cee..8bf8f20db 100755 --- a/scripts/nasm/2.11.06/script.sh +++ b/scripts/nasm/2.11.06/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/nasm function mason_load_source { mason_download \ - http://www.nasm.us/pub/nasm/releasebuilds/${MASON_VERSION}/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ + https://www.nasm.us/pub/nasm/releasebuilds/${MASON_VERSION}/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ 9602eca86270d4df37f53ae4de2342073ad4adc7 mason_extract_tar_bz2 diff --git a/scripts/ninja/1.10.1/.travis.yml b/scripts/ninja/1.10.1/.travis.yml new file mode 100644 index 000000000..8c71516f9 --- /dev/null +++ b/scripts/ninja/1.10.1/.travis.yml @@ -0,0 +1,13 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + compiler: clang + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/ninja/1.10.1/script.sh b/scripts/ninja/1.10.1/script.sh new file mode 100755 index 000000000..3794b2aa1 --- /dev/null +++ b/scripts/ninja/1.10.1/script.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +MASON_NAME=ninja +MASON_VERSION=1.10.1 +MASON_LIB_FILE=bin/ninja + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/ninja-build/ninja/archive/v${MASON_VERSION}.tar.gz \ + 39f0b8b8335a6ddc3678b9c58c4042ae3fa1424f + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + ./configure.py --bootstrap + mkdir -p ${MASON_PREFIX}/bin/ + cp ./ninja ${MASON_PREFIX}/bin/ +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/ninja/1.9.0/.travis.yml b/scripts/ninja/1.9.0/.travis.yml new file mode 100644 index 000000000..8c71516f9 --- /dev/null +++ b/scripts/ninja/1.9.0/.travis.yml @@ -0,0 +1,13 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + compiler: clang + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/ninja/1.9.0/script.sh b/scripts/ninja/1.9.0/script.sh new file mode 100755 index 000000000..6064a84dc --- /dev/null +++ b/scripts/ninja/1.9.0/script.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +MASON_NAME=ninja +MASON_VERSION=1.9.0 +MASON_LIB_FILE=bin/ninja + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/ninja-build/ninja/archive/v${MASON_VERSION}.tar.gz \ + 794274ddac80ccfff85c3aa0c6ff5004a98a0142 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + ./configure.py --bootstrap + mkdir -p ${MASON_PREFIX}/bin/ + cp ./ninja ${MASON_PREFIX}/bin/ +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/node/6.14.3-g/.travis.yml b/scripts/node/6.14.3-g/.travis.yml new file mode 100644 index 000000000..8d782808b --- /dev/null +++ b/scripts/node/6.14.3-g/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/node/6.14.3-g/patch.diff b/scripts/node/6.14.3-g/patch.diff new file mode 100644 index 000000000..7a28de369 --- /dev/null +++ b/scripts/node/6.14.3-g/patch.diff @@ -0,0 +1,91 @@ +diff --git a/Makefile b/Makefile +index 523a3d8..d1e7fce 100644 +--- a/Makefile ++++ b/Makefile +@@ -57,7 +57,7 @@ V ?= 1 + ifeq ($(BUILDTYPE),Release) + all: out/Makefile $(NODE_EXE) + else +-all: out/Makefile $(NODE_EXE) $(NODE_G_EXE) ++all: out/Makefile $(NODE_G_EXE) + endif + + # The .PHONY is needed to ensure that we recursively use the out/Makefile +@@ -733,7 +733,7 @@ endif + + $(BINARYTAR): release-only + $(RM) -r $(BINARYNAME) +- $(RM) -r out/deps out/Release ++ $(RM) -r out/deps out/Debug + $(PYTHON) ./configure \ + --prefix=/ \ + --dest-cpu=$(DESTCPU) \ +@@ -902,4 +902,3 @@ endif + bench-ci lint-js-ci doc-only $(TARBALL)-headers test-ci test-ci-native \ + test-ci-js build-ci test-hash-seed clear-stalled test-addons-napi \ + build-addons-napi +- +diff --git a/tools/install.py b/tools/install.py +index bb7d528..6dea6d6 100755 +--- a/tools/install.py ++++ b/tools/install.py +@@ -110,7 +110,7 @@ def subdir_files(path, dest, action): + def files(action): + is_windows = sys.platform == 'win32' + output_file = 'node' +- output_prefix = 'out/Release/' ++ output_prefix = 'out/Debug/' + + if 'false' == variables.get('node_shared'): + if is_windows: +@@ -131,7 +131,7 @@ def files(action): + action([output_prefix + output_file], 'lib/' + output_file) + + if 'true' == variables.get('node_use_dtrace'): +- action(['out/Release/node.d'], 'lib/dtrace/node.d') ++ action(['out/Debug/node.d'], 'lib/dtrace/node.d') + + # behave similarly for systemtap + action(['src/node.stp'], 'share/systemtap/tapset/') +@@ -163,7 +163,7 @@ def headers(action): + + # Add the expfile that is created on AIX + if sys.platform.startswith('aix'): +- action(['out/Release/node.exp'], 'include/node/') ++ action(['out/Debug/node.exp'], 'include/node/') + + subdir_files('deps/v8/include', 'include/node/', action) + +diff --git a/src/util.h b/src/util.h +index ecd5b12..aa7c41e 100644 +--- a/src/util.h ++++ b/src/util.h +@@ -11,16 +11,8 @@ + #include + #include + +-// OSX 10.9 defaults to libc++ which provides a C++11 header. +-#if defined(__APPLE__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1090 +-#define USE_TR1_TYPE_TRAITS +-#endif + +-#ifdef USE_TR1_TYPE_TRAITS +-#include // NOLINT(build/c++tr1) +-#else + #include // std::remove_reference +-#endif + + namespace node { + +@@ -47,11 +39,7 @@ NO_RETURN void Abort(); + NO_RETURN void Assert(const char* const (*args)[4]); + void DumpBacktrace(FILE* fp); + +-#ifdef USE_TR1_TYPE_TRAITS +-template using remove_reference = std::tr1::remove_reference; +-#else + template using remove_reference = std::remove_reference; +-#endif + + #define FIXED_ONE_BYTE_STRING(isolate, string) \ + (node::OneByteString((isolate), (string), sizeof(string) - 1)) diff --git a/scripts/node/6.14.3-g/script.sh b/scripts/node/6.14.3-g/script.sh new file mode 100644 index 000000000..0e36e53de --- /dev/null +++ b/scripts/node/6.14.3-g/script.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +MASON_NAME=node +MASON_VERSION=6.14.3-g +MASON_VERSION2=6.14.3 +MASON_LIB_FILE=bin/node + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/nodejs/node/archive/v${MASON_VERSION2}.tar.gz \ + ee37fb7e5594b3240df99e6fdea2cc55e887e77d + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/node-${MASON_VERSION2} +} + +function mason_prepare_compile { + CCACHE_VERSION=3.3.4 + + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + export CXX="${MASON_CCACHE}/bin/ccache ${CXX:-clang++}" + export CC="${MASON_CCACHE}/bin/ccache ${CC:-clang}" + export LINK=${CXX:-clang++} +} + +function mason_compile { + # init a git repo to avoid the nodejs Makefile + # complaining about changes that it detects in the parent directory + git init . + + mason_step "Loading patch" + patch -N -p1 < ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff + + # disable icu + export BUILD_INTL_FLAGS="--with-intl=none" + export BUILD_DOWNLOAD_FLAGS=" " + export DISABLE_V8_I18N=1 + export TAG= + export BUILDTYPE=Debug + export DISTTYPE=release + export CONFIG_FLAGS="--debug --shared-zlib" + + export CXXFLAGS="${CXXFLAGS} -std=c++11" + export LDFLAGS="${LDFLAGS} -std=c++11" + + if [[ $(uname -s) == 'Darwin' ]]; then + export CXXFLAGS="${CXXFLAGS} -stdlib=libc++" + export LDFLAGS="${LDFLAGS} -stdlib=libc++" + fi + + echo "making binary" + # we use `make binary` to hook into PORTABLE=1 + # note, pass V=1 to see compile args (default off to avoid breaking the 4 GB log limit on travis) + V= PREFIX=${MASON_PREFIX} make binary -j${MASON_CONCURRENCY} + ls + echo "uncompressing binary" + tar -xf *.tar.gz + echo "making dir" + mkdir -p ${MASON_PREFIX} + echo "copying over" + cp -r node-v${MASON_VERSION2}*/* ${MASON_PREFIX}/ + # the 'make binary' target does not package the node debug binary `node_g` so we manually copy over now + cp out/Debug/node ${MASON_PREFIX}/bin/node +} + +function mason_cflags { + : +} + +function mason_static_libs { + : +} + + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/node/6.14.3/.travis.yml b/scripts/node/6.14.3/.travis.yml new file mode 100644 index 000000000..8d782808b --- /dev/null +++ b/scripts/node/6.14.3/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/node/6.14.3/patch.diff b/scripts/node/6.14.3/patch.diff new file mode 100644 index 000000000..836d91db1 --- /dev/null +++ b/scripts/node/6.14.3/patch.diff @@ -0,0 +1,33 @@ +diff --git a/src/util.h b/src/util.h +index ecd5b12..aa7c41e 100644 +--- a/src/util.h ++++ b/src/util.h +@@ -11,16 +11,8 @@ + #include + #include + +-// OSX 10.9 defaults to libc++ which provides a C++11 header. +-#if defined(__APPLE__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1090 +-#define USE_TR1_TYPE_TRAITS +-#endif + +-#ifdef USE_TR1_TYPE_TRAITS +-#include // NOLINT(build/c++tr1) +-#else + #include // std::remove_reference +-#endif + + namespace node { + +@@ -47,11 +39,7 @@ NO_RETURN void Abort(); + NO_RETURN void Assert(const char* const (*args)[4]); + void DumpBacktrace(FILE* fp); + +-#ifdef USE_TR1_TYPE_TRAITS +-template using remove_reference = std::tr1::remove_reference; +-#else + template using remove_reference = std::remove_reference; +-#endif + + #define FIXED_ONE_BYTE_STRING(isolate, string) \ + (node::OneByteString((isolate), (string), sizeof(string) - 1)) diff --git a/scripts/node/6.14.3/script.sh b/scripts/node/6.14.3/script.sh new file mode 100644 index 000000000..2eb0f0f58 --- /dev/null +++ b/scripts/node/6.14.3/script.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +MASON_NAME=node +MASON_VERSION=6.14.3 +MASON_LIB_FILE=bin/node + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/nodejs/node/archive/v${MASON_VERSION}.tar.gz \ + ee37fb7e5594b3240df99e6fdea2cc55e887e77d + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/node-${MASON_VERSION} +} + +function mason_prepare_compile { + CCACHE_VERSION=3.3.4 + + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + export CXX="${MASON_CCACHE}/bin/ccache ${CXX:-clang++}" + export CC="${MASON_CCACHE}/bin/ccache ${CC:-clang}" + export LINK=${CXX:-clang++} +} + +function mason_compile { + # init a git repo to avoid the nodejs Makefile + # complaining about changes that it detects in the parent directory + git init . + + mason_step "Loading patch" + patch -N -p1 < ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff + + # disable icu + export BUILD_INTL_FLAGS="--with-intl=none" + export BUILD_DOWNLOAD_FLAGS=" " + export DISABLE_V8_I18N=1 + export TAG= + export BUILDTYPE=Release + export DISTTYPE=release + export CONFIG_FLAGS="--shared-zlib" + + export CXXFLAGS="${CXXFLAGS} -std=c++11" + export LDFLAGS="${LDFLAGS} -std=c++11" + + if [[ $(uname -s) == 'Darwin' ]]; then + export CXXFLAGS="${CXXFLAGS} -stdlib=libc++" + export LDFLAGS="${LDFLAGS} -stdlib=libc++" + fi + + echo "making binary" + # we use `make binary` to hook into PORTABLE=1 + # note, pass V=1 to see compile args (default off to avoid breaking the 4 GB log limit on travis) + V= PREFIX=${MASON_PREFIX} make binary -j${MASON_CONCURRENCY} + ls + echo "uncompressing binary" + tar -xf *.tar.gz + echo "making dir" + mkdir -p ${MASON_PREFIX} + echo "copying over" + cp -r node-v${MASON_VERSION}*/* ${MASON_PREFIX}/ +} + +function mason_cflags { + : +} + +function mason_static_libs { + : +} + + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/node/8.11.3/.travis.yml b/scripts/node/8.11.3/.travis.yml new file mode 100644 index 000000000..8d782808b --- /dev/null +++ b/scripts/node/8.11.3/.travis.yml @@ -0,0 +1,18 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/node/8.11.3/script.sh b/scripts/node/8.11.3/script.sh new file mode 100644 index 000000000..863f20807 --- /dev/null +++ b/scripts/node/8.11.3/script.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash + +MASON_NAME=node +MASON_VERSION=8.11.3 +MASON_LIB_FILE=bin/node + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/nodejs/node/archive/v${MASON_VERSION}.tar.gz \ + fa5631c244128c5ef6c51708be0f4b6918d123eb + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/node-${MASON_VERSION} +} + +function mason_prepare_compile { + CCACHE_VERSION=3.3.4 + + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + export CXX="${MASON_CCACHE}/bin/ccache ${CXX:-clang++}" + export CC="${MASON_CCACHE}/bin/ccache ${CC:-clang}" + export LINK=${CXX:-clang++} +} + +function mason_compile { + # init a git repo to avoid the nodejs Makefile + # complaining about changes that it detects in the parent directory + git init . + + # disable icu + export BUILD_INTL_FLAGS="--with-intl=none" + export BUILD_DOWNLOAD_FLAGS=" " + export DISABLE_V8_I18N=1 + export TAG= + export BUILDTYPE=Release + export DISTTYPE=release + export CONFIG_FLAGS="--shared-zlib" + + export CXXFLAGS="${CXXFLAGS} -std=c++11" + export LDFLAGS="${LDFLAGS} -std=c++11" + + if [[ $(uname -s) == 'Darwin' ]]; then + export CXXFLAGS="${CXXFLAGS} -stdlib=libc++" + export LDFLAGS="${LDFLAGS} -stdlib=libc++" + fi + + echo "making binary" + # we use `make binary` to hook into PORTABLE=1 + # note, pass V=1 to see compile args (default off to avoid breaking the 4 GB log limit on travis) + V= PREFIX=${MASON_PREFIX} make binary -j${MASON_CONCURRENCY} + ls + echo "uncompressing binary" + tar -xf *.tar.gz + echo "making dir" + mkdir -p ${MASON_PREFIX} + echo "copying over" + cp -r node-v${MASON_VERSION}*/* ${MASON_PREFIX}/ +} + +function mason_cflags { + : +} + +function mason_static_libs { + : +} + + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/nsis/3.01/script.sh b/scripts/nsis/3.01/script.sh index 10b97a437..bed0d1511 100755 --- a/scripts/nsis/3.01/script.sh +++ b/scripts/nsis/3.01/script.sh @@ -18,7 +18,7 @@ function mason_load_source { function mason_compile { if [ ! -f scons-local-2.5.1.tar.gz ]; then - wget http://prdownloads.sourceforge.net/scons/scons-local-2.5.1.tar.gz + wget https://prdownloads.sourceforge.net/scons/scons-local-2.5.1.tar.gz tar xvf scons-local-2.5.1.tar.gz fi perl -i -p -e "s/'__attribute__\(\(__stdcall__\)\)'/'\"__attribute__\(\(__stdcall__\)\)\"'/g" SCons/Config/gnu diff --git a/scripts/nunicode/1.8/.travis.yml b/scripts/nunicode/1.8/.travis.yml new file mode 100644 index 000000000..1b6ba4fd4 --- /dev/null +++ b/scripts/nunicode/1.8/.travis.yml @@ -0,0 +1,40 @@ +language: cpp + +sudo: false + +matrix: + include: + - os: osx + env: MASON_PLATFORM=osx + compiler: clang + - os: linux + env: MASON_PLATFORM=linux + compiler: clang + - os: linux + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v5 + - os: linux + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v7 + - os: linux + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v8 + - os: linux + env: MASON_PLATFORM=android MASON_ANDROID_ABI=x86 + - os: linux + env: MASON_PLATFORM=android MASON_ANDROID_ABI=x86-64 + - os: linux + env: MASON_PLATFORM=android MASON_ANDROID_ABI=mips + - os: linux + env: MASON_PLATFORM=android MASON_ANDROID_ABI=mips-64 + +addons: + apt: + sources: [ 'ubuntu-toolchain-r-test', 'george-edison55-precise-backports' ] + packages: [ 'libstdc++-5-dev', 'cmake', 'cmake-data' ] + +install: +- if [[ $(uname -s) == 'Darwin' && $(which brew) == '' ]]; then brew install cmake; fi + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/nunicode/1.8/script.sh b/scripts/nunicode/1.8/script.sh new file mode 100644 index 000000000..02c0f6b09 --- /dev/null +++ b/scripts/nunicode/1.8/script.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +MASON_NAME=nunicode +MASON_VERSION=1.8 +MASON_LIB_FILE=lib/libnu.a +MASON_PKGCONFIG_FILE=lib/pkgconfig/nu.pc + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download https://bitbucket.org/alekseyt/nunicode/get/1.8.tar.bz2 \ + d4788a570b8eafec01eaa7a3425a3529d9f70842 + + mason_extract_tar_bz2 + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/alekseyt-nunicode-246bb27014ab +} + +function mason_compile { + mkdir -p build-dir + cd build-dir + + # patch CMakeLists file + cat ../CMakeLists.txt | sed -e '/find_package.Sqlite3/ s/^/#/' > ../CMakeLists.txt.new && cp ../CMakeLists.txt.new ../CMakeLists.txt + + if [ ${MASON_PLATFORM} = 'android' ]; then + ${MASON_DIR}/utils/android.sh > toolchain.cmake + + cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} \ + -DCMAKE_TOOLCHAIN_FILE=toolchain.cmake \ + .. + else + cmake \ + -DCMAKE_BUILD_TYPE=RELEASE \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} \ + .. + fi + + make install -j${MASON_CONCURRENCY} +} + +function mason_cflags { + echo -I${MASON_PREFIX}/include +} + +function mason_ldflags { + : # We're only using the full path to the archive, which is output in static_libs +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/openfst/1.6.3/script.sh b/scripts/openfst/1.6.3/script.sh index 72b0d4ce0..3b89f9818 100755 --- a/scripts/openfst/1.6.3/script.sh +++ b/scripts/openfst/1.6.3/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libfst.a function mason_load_source { mason_download \ - http://www.openfst.org/twiki/pub/FST/FstDownload/${MASON_NAME}-${MASON_VERSION}.tar.gz \ + https://www.openfst.org/twiki/pub/FST/FstDownload/${MASON_NAME}-${MASON_VERSION}.tar.gz \ 9e144c56ea477038d14583376b6414170f0e1b1d mason_extract_tar_gz diff --git a/scripts/osmium-tool/1.10.0/.travis.yml b/scripts/osmium-tool/1.10.0/.travis.yml new file mode 100644 index 000000000..65aaef819 --- /dev/null +++ b/scripts/osmium-tool/1.10.0/.travis.yml @@ -0,0 +1,22 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + - pandoc + +script: +- if [[ $(uname -s) == 'Darwin' ]]; then brew install pandoc || true; fi; +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- nm $(./mason prefix ${MASON_NAME} ${MASON_VERSION})/bin/osmium | grep "GLIBC" | c++filt || true +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/osmium-tool/1.10.0/script.sh b/scripts/osmium-tool/1.10.0/script.sh new file mode 100755 index 000000000..d92550135 --- /dev/null +++ b/scripts/osmium-tool/1.10.0/script.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash + +MASON_NAME=osmium-tool +MASON_VERSION=1.10.0 +MASON_LIB_FILE=bin/osmium + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + c7ca8fe2bc4422e81c658e020ab5fe6e31de1977 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_prepare_compile { + CCACHE_VERSION=3.3.4 + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + ${MASON_DIR}/mason install cmake 3.8.2 + ${MASON_DIR}/mason link cmake 3.8.2 + ${MASON_DIR}/mason install protozero 1.6.4 + ${MASON_DIR}/mason link protozero 1.6.4 + ${MASON_DIR}/mason install rapidjson 2016-07-20-369de87 + ${MASON_DIR}/mason link rapidjson 2016-07-20-369de87 + ${MASON_DIR}/mason install libosmium 2.15.0 + ${MASON_DIR}/mason link libosmium 2.15.0 + BOOST_VERSION=1.66.0 + ${MASON_DIR}/mason install boost ${BOOST_VERSION} + ${MASON_DIR}/mason link boost ${BOOST_VERSION} + ${MASON_DIR}/mason install boost_libprogram_options ${BOOST_VERSION} + ${MASON_DIR}/mason link boost_libprogram_options ${BOOST_VERSION} + ${MASON_DIR}/mason install zlib 1.2.8 + ${MASON_DIR}/mason link zlib 1.2.8 + ${MASON_DIR}/mason install expat 2.2.0 + ${MASON_DIR}/mason link expat 2.2.0 + ${MASON_DIR}/mason install bzip2 1.0.6 + ${MASON_DIR}/mason link bzip2 1.0.6 +} + +function mason_compile { + rm -rf build + mkdir -p build + cd build + CMAKE_PREFIX_PATH=${MASON_ROOT}/.link \ + ${MASON_ROOT}/.link/bin/cmake \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} \ + -DCMAKE_CXX_COMPILER_LAUNCHER="${MASON_CCACHE}/bin/ccache" \ + -DCMAKE_BUILD_TYPE=Release \ + -DBoost_NO_SYSTEM_PATHS=ON \ + -DBoost_USE_STATIC_LIBS=ON \ + .. + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + make VERBOSE=1 -j4 + else + make VERBOSE=1 -j${MASON_CONCURRENCY} + fi + make install + +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/osmium-tool/1.11.0/.travis.yml b/scripts/osmium-tool/1.11.0/.travis.yml new file mode 100644 index 000000000..65aaef819 --- /dev/null +++ b/scripts/osmium-tool/1.11.0/.travis.yml @@ -0,0 +1,22 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + - pandoc + +script: +- if [[ $(uname -s) == 'Darwin' ]]; then brew install pandoc || true; fi; +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- nm $(./mason prefix ${MASON_NAME} ${MASON_VERSION})/bin/osmium | grep "GLIBC" | c++filt || true +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/osmium-tool/1.11.0/script.sh b/scripts/osmium-tool/1.11.0/script.sh new file mode 100755 index 000000000..d5fc2b1ad --- /dev/null +++ b/scripts/osmium-tool/1.11.0/script.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash + +MASON_NAME=osmium-tool +MASON_VERSION=1.11.0 +MASON_LIB_FILE=bin/osmium + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + 7d45762397e695fc0156007034b9cc98adf91380 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_prepare_compile { + CCACHE_VERSION=3.3.4 + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + ${MASON_DIR}/mason install cmake 3.15.2 + ${MASON_DIR}/mason link cmake 3.15.2 + ${MASON_DIR}/mason install protozero 1.6.8 + ${MASON_DIR}/mason link protozero 1.6.8 + ${MASON_DIR}/mason install rapidjson 2016-07-20-369de87 + ${MASON_DIR}/mason link rapidjson 2016-07-20-369de87 + ${MASON_DIR}/mason install libosmium 2.15.3 + ${MASON_DIR}/mason link libosmium 2.15.3 + BOOST_VERSION=1.66.0 + ${MASON_DIR}/mason install boost ${BOOST_VERSION} + ${MASON_DIR}/mason link boost ${BOOST_VERSION} + ${MASON_DIR}/mason install boost_libprogram_options ${BOOST_VERSION} + ${MASON_DIR}/mason link boost_libprogram_options ${BOOST_VERSION} + ${MASON_DIR}/mason install zlib 1.2.8 + ${MASON_DIR}/mason link zlib 1.2.8 + ${MASON_DIR}/mason install expat 2.2.0 + ${MASON_DIR}/mason link expat 2.2.0 + ${MASON_DIR}/mason install bzip2 1.0.6 + ${MASON_DIR}/mason link bzip2 1.0.6 +} + +function mason_compile { + rm -rf build + mkdir -p build + cd build + CMAKE_PREFIX_PATH=${MASON_ROOT}/.link \ + ${MASON_ROOT}/.link/bin/cmake \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} \ + -DCMAKE_CXX_COMPILER_LAUNCHER="${MASON_CCACHE}/bin/ccache" \ + -DCMAKE_BUILD_TYPE=Release \ + -DBoost_NO_SYSTEM_PATHS=ON \ + -DBoost_USE_STATIC_LIBS=ON \ + .. + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + make VERBOSE=1 -j4 + else + make VERBOSE=1 -j${MASON_CONCURRENCY} + fi + make install + +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/osmium-tool/1.12.0/.travis.yml b/scripts/osmium-tool/1.12.0/.travis.yml new file mode 100644 index 000000000..65aaef819 --- /dev/null +++ b/scripts/osmium-tool/1.12.0/.travis.yml @@ -0,0 +1,22 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + - pandoc + +script: +- if [[ $(uname -s) == 'Darwin' ]]; then brew install pandoc || true; fi; +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- nm $(./mason prefix ${MASON_NAME} ${MASON_VERSION})/bin/osmium | grep "GLIBC" | c++filt || true +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/osmium-tool/1.12.0/script.sh b/scripts/osmium-tool/1.12.0/script.sh new file mode 100755 index 000000000..afb2f42b5 --- /dev/null +++ b/scripts/osmium-tool/1.12.0/script.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash + +MASON_NAME=osmium-tool +MASON_VERSION=1.12.0 +MASON_LIB_FILE=bin/osmium + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + a87aee6d85b6e45803065c858e6c77746bfaa0ca + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_prepare_compile { + CCACHE_VERSION=3.3.4 + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + ${MASON_DIR}/mason install cmake 3.15.2 + ${MASON_DIR}/mason link cmake 3.15.2 + ${MASON_DIR}/mason install protozero 1.7.0 + ${MASON_DIR}/mason link protozero 1.7.0 + ${MASON_DIR}/mason install rapidjson 2016-07-20-369de87 + ${MASON_DIR}/mason link rapidjson 2016-07-20-369de87 + ${MASON_DIR}/mason install libosmium 2.15.5 + ${MASON_DIR}/mason link libosmium 2.15.5 + BOOST_VERSION=1.73.0 + ${MASON_DIR}/mason install boost ${BOOST_VERSION} + ${MASON_DIR}/mason link boost ${BOOST_VERSION} + ${MASON_DIR}/mason install boost_libprogram_options ${BOOST_VERSION} + ${MASON_DIR}/mason link boost_libprogram_options ${BOOST_VERSION} + ${MASON_DIR}/mason install zlib 1.2.8 + ${MASON_DIR}/mason link zlib 1.2.8 + ${MASON_DIR}/mason install expat 2.2.4 + ${MASON_DIR}/mason link expat 2.2.4 + ${MASON_DIR}/mason install bzip2 1.0.6 + ${MASON_DIR}/mason link bzip2 1.0.6 +} + +function mason_compile { + rm -rf build + mkdir -p build + cd build + CMAKE_PREFIX_PATH=${MASON_ROOT}/.link \ + ${MASON_ROOT}/.link/bin/cmake \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} \ + -DCMAKE_CXX_COMPILER_LAUNCHER="${MASON_CCACHE}/bin/ccache" \ + -DCMAKE_BUILD_TYPE=Release \ + -DBoost_NO_SYSTEM_PATHS=ON \ + -DBoost_USE_STATIC_LIBS=ON \ + .. + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + make VERBOSE=1 -j4 + else + make VERBOSE=1 -j${MASON_CONCURRENCY} + fi + make install + +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/osmium-tool/1.12.1/.travis.yml b/scripts/osmium-tool/1.12.1/.travis.yml new file mode 100644 index 000000000..931a05d57 --- /dev/null +++ b/scripts/osmium-tool/1.12.1/.travis.yml @@ -0,0 +1,22 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + - pandoc + +script: +- if [[ $(uname -s) == 'Darwin' ]]; then brew install pandoc || true; fi; +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- nm $(./mason prefix ${MASON_NAME} ${MASON_VERSION})/bin/osmium | grep "GLIBC" | c++filt || true +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/osmium-tool/1.12.1/script.sh b/scripts/osmium-tool/1.12.1/script.sh new file mode 100644 index 000000000..97985bbec --- /dev/null +++ b/scripts/osmium-tool/1.12.1/script.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash + +MASON_NAME=osmium-tool +MASON_VERSION=1.12.1 +MASON_LIB_FILE=bin/osmium + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + 9e2c4f564ba677bfc386ecf65154bf4531afe1c0 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_prepare_compile { + CCACHE_VERSION=3.3.4 + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + ${MASON_DIR}/mason install cmake 3.15.2 + ${MASON_DIR}/mason link cmake 3.15.2 + ${MASON_DIR}/mason install protozero 1.7.0 + ${MASON_DIR}/mason link protozero 1.7.0 + ${MASON_DIR}/mason install rapidjson 2016-07-20-369de87 + ${MASON_DIR}/mason link rapidjson 2016-07-20-369de87 + ${MASON_DIR}/mason install libosmium 2.15.6 + ${MASON_DIR}/mason link libosmium 2.15.6 + BOOST_VERSION=1.73.0 + ${MASON_DIR}/mason install boost ${BOOST_VERSION} + ${MASON_DIR}/mason link boost ${BOOST_VERSION} + ${MASON_DIR}/mason install boost_libprogram_options ${BOOST_VERSION} + ${MASON_DIR}/mason link boost_libprogram_options ${BOOST_VERSION} + ${MASON_DIR}/mason install zlib 1.2.8 + ${MASON_DIR}/mason link zlib 1.2.8 + ${MASON_DIR}/mason install expat 2.2.4 + ${MASON_DIR}/mason link expat 2.2.4 + ${MASON_DIR}/mason install bzip2 1.0.6 + ${MASON_DIR}/mason link bzip2 1.0.6 +} + +function mason_compile { + rm -rf build + mkdir -p build + cd build + CMAKE_PREFIX_PATH=${MASON_ROOT}/.link \ + ${MASON_ROOT}/.link/bin/cmake \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} \ + -DCMAKE_CXX_COMPILER_LAUNCHER="${MASON_CCACHE}/bin/ccache" \ + -DCMAKE_BUILD_TYPE=Release \ + -DBoost_NO_SYSTEM_PATHS=ON \ + -DBoost_USE_STATIC_LIBS=ON \ + .. + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + make VERBOSE=1 -j4 + else + make VERBOSE=1 -j${MASON_CONCURRENCY} + fi + make install + +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" \ No newline at end of file diff --git a/scripts/osmium-tool/1.9.0/.travis.yml b/scripts/osmium-tool/1.9.0/.travis.yml new file mode 100644 index 000000000..65aaef819 --- /dev/null +++ b/scripts/osmium-tool/1.9.0/.travis.yml @@ -0,0 +1,22 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + - pandoc + +script: +- if [[ $(uname -s) == 'Darwin' ]]; then brew install pandoc || true; fi; +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- nm $(./mason prefix ${MASON_NAME} ${MASON_VERSION})/bin/osmium | grep "GLIBC" | c++filt || true +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/osmium-tool/1.9.0/script.sh b/scripts/osmium-tool/1.9.0/script.sh new file mode 100755 index 000000000..ff1fbefdf --- /dev/null +++ b/scripts/osmium-tool/1.9.0/script.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash + +MASON_NAME=osmium-tool +MASON_VERSION=1.9.0 +MASON_LIB_FILE=bin/osmium + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v1.9.0.tar.gz \ + 4218c5dc9fe3ebbd225ead1be419b51dc82578df + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-1.9.0 +} + +function mason_prepare_compile { + CCACHE_VERSION=3.3.4 + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + ${MASON_DIR}/mason install cmake 3.8.2 + ${MASON_DIR}/mason link cmake 3.8.2 + ${MASON_DIR}/mason install protozero 1.6.3 + ${MASON_DIR}/mason link protozero 1.6.3 + ${MASON_DIR}/mason install rapidjson 2016-07-20-369de87 + ${MASON_DIR}/mason link rapidjson 2016-07-20-369de87 + ${MASON_DIR}/mason install libosmium 2.14.2 + ${MASON_DIR}/mason link libosmium 2.14.2 + BOOST_VERSION=1.66.0 + ${MASON_DIR}/mason install boost ${BOOST_VERSION} + ${MASON_DIR}/mason link boost ${BOOST_VERSION} + ${MASON_DIR}/mason install boost_libprogram_options ${BOOST_VERSION} + ${MASON_DIR}/mason link boost_libprogram_options ${BOOST_VERSION} + ${MASON_DIR}/mason install zlib 1.2.8 + ${MASON_DIR}/mason link zlib 1.2.8 + ${MASON_DIR}/mason install expat 2.2.0 + ${MASON_DIR}/mason link expat 2.2.0 + ${MASON_DIR}/mason install bzip2 1.0.6 + ${MASON_DIR}/mason link bzip2 1.0.6 +} + +function mason_compile { + rm -rf build + mkdir -p build + cd build + CMAKE_PREFIX_PATH=${MASON_ROOT}/.link \ + ${MASON_ROOT}/.link/bin/cmake \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} \ + -DCMAKE_CXX_COMPILER_LAUNCHER="${MASON_CCACHE}/bin/ccache" \ + -DCMAKE_BUILD_TYPE=Release \ + -DBoost_NO_SYSTEM_PATHS=ON \ + -DBoost_USE_STATIC_LIBS=ON \ + .. + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + make VERBOSE=1 -j4 + else + make VERBOSE=1 -j${MASON_CONCURRENCY} + fi + make install + +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/osmium-tool/1.9.1/.travis.yml b/scripts/osmium-tool/1.9.1/.travis.yml new file mode 100644 index 000000000..65aaef819 --- /dev/null +++ b/scripts/osmium-tool/1.9.1/.travis.yml @@ -0,0 +1,22 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.8-dev + - pandoc + +script: +- if [[ $(uname -s) == 'Darwin' ]]; then brew install pandoc || true; fi; +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- nm $(./mason prefix ${MASON_NAME} ${MASON_VERSION})/bin/osmium | grep "GLIBC" | c++filt || true +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/osmium-tool/1.9.1/script.sh b/scripts/osmium-tool/1.9.1/script.sh new file mode 100755 index 000000000..4c7cc34c6 --- /dev/null +++ b/scripts/osmium-tool/1.9.1/script.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash + +MASON_NAME=osmium-tool +MASON_VERSION=1.9.1 +MASON_LIB_FILE=bin/osmium + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + 746bc34975ef726c5d403530855948eca5bf6047 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_prepare_compile { + CCACHE_VERSION=3.3.4 + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + ${MASON_DIR}/mason install cmake 3.8.2 + ${MASON_DIR}/mason link cmake 3.8.2 + ${MASON_DIR}/mason install protozero 1.6.3 + ${MASON_DIR}/mason link protozero 1.6.3 + ${MASON_DIR}/mason install rapidjson 2016-07-20-369de87 + ${MASON_DIR}/mason link rapidjson 2016-07-20-369de87 + ${MASON_DIR}/mason install libosmium 2.14.2 + ${MASON_DIR}/mason link libosmium 2.14.2 + BOOST_VERSION=1.66.0 + ${MASON_DIR}/mason install boost ${BOOST_VERSION} + ${MASON_DIR}/mason link boost ${BOOST_VERSION} + ${MASON_DIR}/mason install boost_libprogram_options ${BOOST_VERSION} + ${MASON_DIR}/mason link boost_libprogram_options ${BOOST_VERSION} + ${MASON_DIR}/mason install zlib 1.2.8 + ${MASON_DIR}/mason link zlib 1.2.8 + ${MASON_DIR}/mason install expat 2.2.0 + ${MASON_DIR}/mason link expat 2.2.0 + ${MASON_DIR}/mason install bzip2 1.0.6 + ${MASON_DIR}/mason link bzip2 1.0.6 +} + +function mason_compile { + rm -rf build + mkdir -p build + cd build + CMAKE_PREFIX_PATH=${MASON_ROOT}/.link \ + ${MASON_ROOT}/.link/bin/cmake \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} \ + -DCMAKE_CXX_COMPILER_LAUNCHER="${MASON_CCACHE}/bin/ccache" \ + -DCMAKE_BUILD_TYPE=Release \ + -DBoost_NO_SYSTEM_PATHS=ON \ + -DBoost_USE_STATIC_LIBS=ON \ + .. + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + make VERBOSE=1 -j4 + else + make VERBOSE=1 -j${MASON_CONCURRENCY} + fi + make install + +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/osmium-tool/336eb45/.travis.yml b/scripts/osmium-tool/336eb45/.travis.yml new file mode 100644 index 000000000..ebbe5fc07 --- /dev/null +++ b/scripts/osmium-tool/336eb45/.travis.yml @@ -0,0 +1,21 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-5-dev + - pandoc + +script: +- if [[ $(uname -s) == 'Darwin' ]]; then brew install pandoc || true; fi; +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/osmium-tool/336eb45/script.sh b/scripts/osmium-tool/336eb45/script.sh new file mode 100755 index 000000000..14447a0d2 --- /dev/null +++ b/scripts/osmium-tool/336eb45/script.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +MASON_NAME=osmium-tool +MASON_VERSION=336eb45 +MASON_LIB_FILE=bin/osmium + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/osmcode/${MASON_NAME}/archive/${MASON_VERSION}.tar.gz \ + 8c093c86df7a7f7f599886208aded0d7bb16858d + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-336eb453ebe2f119b458af906d23635230294e2b +} + +function mason_prepare_compile { + CCACHE_VERSION=3.3.1 + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + ${MASON_DIR}/mason install cmake 3.7.1 + ${MASON_DIR}/mason link cmake 3.7.1 + ${MASON_DIR}/mason install utfcpp 2.3.4 + ${MASON_DIR}/mason link utfcpp 2.3.4 + ${MASON_DIR}/mason install protozero 1.6.2 + ${MASON_DIR}/mason link protozero 1.6.2 + ${MASON_DIR}/mason install rapidjson 2016-07-20-369de87 + ${MASON_DIR}/mason link rapidjson 2016-07-20-369de87 + ${MASON_DIR}/mason install libosmium 2.14.0 + ${MASON_DIR}/mason link libosmium 2.14.0 + BOOST_VERSION=1.63.0 + ${MASON_DIR}/mason install boost ${BOOST_VERSION} + ${MASON_DIR}/mason link boost ${BOOST_VERSION} + ${MASON_DIR}/mason install boost_libprogram_options ${BOOST_VERSION} + ${MASON_DIR}/mason link boost_libprogram_options ${BOOST_VERSION} + ${MASON_DIR}/mason install zlib 1.2.8 + ${MASON_DIR}/mason link zlib 1.2.8 + ${MASON_DIR}/mason install expat 2.2.0 + ${MASON_DIR}/mason link expat 2.2.0 + ${MASON_DIR}/mason install bzip2 1.0.6 + ${MASON_DIR}/mason link bzip2 1.0.6 +} + +function mason_compile { + rm -rf build + mkdir -p build + cd build + CMAKE_PREFIX_PATH=${MASON_ROOT}/.link \ + ${MASON_ROOT}/.link/bin/cmake \ + -DCMAKE_INSTALL_PREFIX=${MASON_PREFIX} \ + -DCMAKE_CXX_COMPILER_LAUNCHER="${MASON_CCACHE}/bin/ccache" \ + -DCMAKE_BUILD_TYPE=Release \ + -DBoost_NO_SYSTEM_PATHS=ON \ + -DBoost_USE_STATIC_LIBS=ON \ + .. + # limit concurrency on travis to avoid heavy jobs being killed + if [[ ${TRAVIS_OS_NAME:-} ]]; then + make VERBOSE=1 -j4 + else + make VERBOSE=1 -j${MASON_CONCURRENCY} + fi + make install + +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/parallel/20160422/script.sh b/scripts/parallel/20160422/script.sh index f49c95218..aaf51eb5a 100755 --- a/scripts/parallel/20160422/script.sh +++ b/scripts/parallel/20160422/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/parallel function mason_load_source { mason_download \ - http://ftp.gnu.org/gnu/${MASON_NAME}/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ + https://ftp.gnu.org/gnu/${MASON_NAME}/${MASON_NAME}-${MASON_VERSION}.tar.bz2 \ 032c35aaecc65aa1298b33c48f0a4418041771e4 mason_extract_tar_bz2 diff --git a/scripts/perf/4.15.15/.travis.yml b/scripts/perf/4.15.15/.travis.yml new file mode 100644 index 000000000..9c5a0f11b --- /dev/null +++ b/scripts/perf/4.15.15/.travis.yml @@ -0,0 +1,28 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + # apt: + # make systemtap-sdt-dev bison flex libperl-dev + # yum: + # make flex bison elfutils-libelf-devel elfutils-devel libunwind-devel xz-devel numactl-devel openssl-devel slang-devel gtk2-devel perl-ExtUtils-Embed python-devel binutils-devel audit-libs-devel + packages: + - bison + - flex + - g++-4.9 + - systemtap-sdt-dev + +# note: perf must be compiled with gcc (see script.sh) +install: + - export CXX=g++-4.9 + - export CC=gcc-4.9 + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/perf/4.15.15/readme.md b/scripts/perf/4.15.15/readme.md new file mode 100644 index 000000000..a5e4ce375 --- /dev/null +++ b/scripts/perf/4.15.15/readme.md @@ -0,0 +1,52 @@ +This is the expected/enabled features of this perf build: + +Taken from https://travis-ci.org/mapbox/mason/builds/336964403#L613 + +Auto-detecting system features: +... dwarf: [ on ] +... dwarf_getlocations: [ on ] +... glibc: [ on ] +... gtk2: [ on ] +... libaudit: [ OFF ] +... libbfd: [ on ] +... libelf: [ on ] +... libnuma: [ OFF ] +... numa_num_possible_cpus: [ OFF ] +... libperl: [ OFF ] +... libpython: [ on ] +... libslang: [ on ] +... libcrypto: [ on ] +... libunwind: [ OFF ] +... libdw-dwarf-unwind: [ on ] +... zlib: [ on ] +... lzma: [ on ] +... get_cpuid: [ on ] +... bpf: [ on ] +... backtrace: [ on ] +... fortify-source: [ on ] +... sync-compare-and-swap: [ on ] +... gtk2-infobar: [ on ] +... libelf-getphdrnum: [ on ] +... libelf-gelf_getnote: [ on ] +... libelf-getshdrstrndx: [ on ] +... libelf-mmap: [ on ] +... libpython-version: [ on ] +... libunwind-x86: [ OFF ] +... libunwind-x86_64: [ OFF ] +... libunwind-arm: [ OFF ] +... libunwind-aarch64: [ OFF ] +... pthread-attr-setaffinity-np: [ on ] +... stackprotector-all: [ on ] +... timerfd: [ on ] +... sched_getcpu: [ on ] +... sdt: [ on ] +... setns: [ on ] +Makefile.config:613: Python support disabled by user +... prefix: /home/travis/build/mapbox/mason/mason_packages/linux-x86_64/perf/4.15.15 +... bindir: /home/travis/build/mapbox/mason/mason_packages/linux-x86_64/perf/4.15.15/bin +... libdir: /home/travis/build/mapbox/mason/mason_packages/linux-x86_64/perf/4.15.15/lib64 +... sysconfdir: /home/travis/build/mapbox/mason/mason_packages/linux-x86_64/perf/4.15.15/etc +... LIBUNWIND_DIR: +... LIBDW_DIR: +... JDIR: /usr/lib/jvm/java-1.6.0-openjdk-amd64 +... DWARF post unwind library: libdw \ No newline at end of file diff --git a/scripts/perf/4.15.15/script.sh b/scripts/perf/4.15.15/script.sh new file mode 100755 index 000000000..6c52cb48c --- /dev/null +++ b/scripts/perf/4.15.15/script.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash + +MASON_NAME=perf +MASON_VERSION=4.15.15 +MASON_LIB_FILE=bin/perf + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + # https://www.kernel.org/ + # https://git.kernel.org/cgit/linux/kernel/git/stable/linux-stable.git/log/tools/perf + mason_download \ + https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-${MASON_VERSION}.tar.xz \ + 49e83b508ceb634f20d2663ed5028c41d0a5f39a + + mason_extract_tar_xz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/linux-${MASON_VERSION} +} + +function mason_prepare_compile { + ${MASON_DIR}/mason install zlib 1.2.8 + MASON_ZLIB=$(${MASON_DIR}/mason prefix zlib 1.2.8) + ${MASON_DIR}/mason install xz 5.2.3 + MASON_XZ=$(${MASON_DIR}/mason prefix xz 5.2.3) + ${MASON_DIR}/mason install binutils 2.30 + MASON_BINUTILS=$(${MASON_DIR}/mason prefix binutils 2.30) + ${MASON_DIR}/mason install slang 2.3.1 + MASON_SLANG=$(${MASON_DIR}/mason prefix slang 2.3.1) + ${MASON_DIR}/mason install bzip2 1.0.6 + MASON_BZIP2=$(${MASON_DIR}/mason prefix bzip2 1.0.6) + ${MASON_DIR}/mason install elfutils 0.170 + MASON_ELFUTILS=$(${MASON_DIR}/mason prefix elfutils 0.170) + EXTRA_CFLAGS="-m64 -I${MASON_SLANG}/include -I${MASON_ZLIB}/include -I${MASON_XZ}/include -I${MASON_BINUTILS}/include -I${MASON_BZIP2}/include -I${MASON_ELFUTILS}/include" + EXTRA_LDFLAGS="-L${MASON_BZIP2}/lib -L${MASON_ZLIB}/lib -L${MASON_XZ}/lib -L${MASON_SLANG}/lib -L${MASON_ELFUTILS}/lib -L${MASON_BINUTILS}/lib" +} + +# https://perf.wiki.kernel.org/index.php/Jolsa_Howto_Install_Sources +# https://askubuntu.com/questions/50145/how-to-install-perf-monitoring-tool/306683 +# https://www.spinics.net/lists/linux-perf-users/msg03040.html +# https://software.intel.com/en-us/articles/linux-perf-for-intel-vtune-Amplifier-XE +# see the readme.md in this directory for a log of what perf features are enabled +function mason_compile { + cd tools/perf + # we set NO_LIBUNWIND since libdw is used from elfutils which is faster: https://lwn.net/Articles/579508/ + # note: LIBELF is needed for symbols + node --perf_basic_prof_only_functions + mkdir -p output-dir + rm -rf output-dir/* + make \ + O=output-dir \ + LIBDW_LDFLAGS="-L${MASON_ELFUTILS}/lib -Wl,--start-group -ldw -lelf -lebl -llzma -lz -lbz2 -ldl -L${MASON_BZIP2}/lib -L${MASON_XZ}/lib" \ + LIBDW_CFLAGS="-I${MASON_ELFUTILS}/include/" \ + V=1 VF=1 \ + prefix=${MASON_PREFIX} \ + NO_LIBNUMA=1 \ + NO_LIBAUDIT=1 \ + NO_LIBUNWIND=1 \ + NO_BIONIC=1 \ + NO_BACKTRACE=1 \ + NO_LIBCRYPTO=1 \ + NO_LIBPERL=1 \ + NO_GTK2=1 \ + LDFLAGS="${EXTRA_LDFLAGS} -Wl,--start-group -L${MASON_BINUTILS}/lib -lbfd -lopcodes -lelf -lz" \ + NO_LIBPYTHON=1 \ + WERROR=0 \ + EXTRA_CFLAGS="${EXTRA_CFLAGS}" \ + install +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/perf/4.16/.travis.yml b/scripts/perf/4.16/.travis.yml new file mode 100644 index 000000000..e978963f7 --- /dev/null +++ b/scripts/perf/4.16/.travis.yml @@ -0,0 +1,29 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + dist: trusty + addons: + apt: + sources: + - ubuntu-toolchain-r-test + # apt: + # make systemtap-sdt-dev bison flex libperl-dev + # yum: + # make flex bison elfutils-libelf-devel elfutils-devel libunwind-devel xz-devel numactl-devel openssl-devel slang-devel gtk2-devel perl-ExtUtils-Embed python-devel binutils-devel audit-libs-devel + packages: + - bison + - flex + - g++-4.9 + - systemtap-sdt-dev + +# note: perf must be compiled with gcc (see script.sh) +install: + - export CXX=g++-4.9 + - export CC=gcc-4.9 + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/perf/4.16/readme.md b/scripts/perf/4.16/readme.md new file mode 100644 index 000000000..ced489175 --- /dev/null +++ b/scripts/perf/4.16/readme.md @@ -0,0 +1,51 @@ +This is the expected/enabled features of this perf build: + +Taken from https://travis-ci.org/mapbox/mason/builds/336964403#L613 + +... dwarf: [ on ] +... dwarf_getlocations: [ on ] +... glibc: [ on ] +... gtk2: [ OFF ] +... libaudit: [ OFF ] +... libbfd: [ on ] +... libelf: [ on ] +... libnuma: [ OFF ] +... numa_num_possible_cpus: [ OFF ] +... libperl: [ OFF ] +... libpython: [ on ] +... libslang: [ on ] +... libcrypto: [ on ] +... libunwind: [ OFF ] +... libdw-dwarf-unwind: [ on ] +... zlib: [ on ] +... lzma: [ on ] +... get_cpuid: [ on ] +... bpf: [ on ] +... backtrace: [ on ] +... fortify-source: [ on ] +... sync-compare-and-swap: [ on ] +... gtk2-infobar: [ OFF ] +... libelf-getphdrnum: [ on ] +... libelf-gelf_getnote: [ on ] +... libelf-getshdrstrndx: [ on ] +... libelf-mmap: [ on ] +... libpython-version: [ on ] +... libunwind-x86: [ OFF ] +... libunwind-x86_64: [ OFF ] +... libunwind-arm: [ OFF ] +... libunwind-aarch64: [ OFF ] +... pthread-attr-setaffinity-np: [ on ] +... stackprotector-all: [ on ] +... timerfd: [ on ] +... sched_getcpu: [ on ] +... sdt: [ on ] +... setns: [ on ] +Makefile.config:613: Python support disabled by user +... prefix: /home/travis/build/mapbox/mason/mason_packages/linux-x86_64/perf/4.15 +... bindir: /home/travis/build/mapbox/mason/mason_packages/linux-x86_64/perf/4.15/bin +... libdir: /home/travis/build/mapbox/mason/mason_packages/linux-x86_64/perf/4.15/lib64 +... sysconfdir: /home/travis/build/mapbox/mason/mason_packages/linux-x86_64/perf/4.15/etc +... LIBUNWIND_DIR: +... LIBDW_DIR: +... JDIR: /usr/lib/jvm/java-1.7.0-openjdk-amd64 +... DWARF post unwind library: libdw \ No newline at end of file diff --git a/scripts/perf/4.16/script.sh b/scripts/perf/4.16/script.sh new file mode 100755 index 000000000..152657d15 --- /dev/null +++ b/scripts/perf/4.16/script.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash + +MASON_NAME=perf +MASON_VERSION=4.16 +MASON_LIB_FILE=bin/perf + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + # https://www.kernel.org/ + # https://git.kernel.org/cgit/linux/kernel/git/stable/linux-stable.git/log/tools/perf + mason_download \ + https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-${MASON_VERSION}.tar.xz \ + 86ade902aa77bf25b8a49c3a82e02ea82891b5fd + + mason_extract_tar_xz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/linux-${MASON_VERSION} +} + +function mason_prepare_compile { + ${MASON_DIR}/mason install zlib 1.2.8 + MASON_ZLIB=$(${MASON_DIR}/mason prefix zlib 1.2.8) + ${MASON_DIR}/mason install xz 5.2.3 + MASON_XZ=$(${MASON_DIR}/mason prefix xz 5.2.3) + ${MASON_DIR}/mason install binutils 2.30 + MASON_BINUTILS=$(${MASON_DIR}/mason prefix binutils 2.30) + ${MASON_DIR}/mason install slang 2.3.1 + MASON_SLANG=$(${MASON_DIR}/mason prefix slang 2.3.1) + ${MASON_DIR}/mason install bzip2 1.0.6 + MASON_BZIP2=$(${MASON_DIR}/mason prefix bzip2 1.0.6) + ${MASON_DIR}/mason install elfutils 0.170 + MASON_ELFUTILS=$(${MASON_DIR}/mason prefix elfutils 0.170) + EXTRA_CFLAGS="-m64 -I${MASON_SLANG}/include -I${MASON_ZLIB}/include -I${MASON_XZ}/include -I${MASON_BINUTILS}/include -I${MASON_BZIP2}/include -I${MASON_ELFUTILS}/include" + EXTRA_LDFLAGS="-L${MASON_BZIP2}/lib -L${MASON_ZLIB}/lib -L${MASON_XZ}/lib -L${MASON_SLANG}/lib -L${MASON_ELFUTILS}/lib -L${MASON_BINUTILS}/lib" +} + +# https://perf.wiki.kernel.org/index.php/Jolsa_Howto_Install_Sources +# https://askubuntu.com/questions/50145/how-to-install-perf-monitoring-tool/306683 +# https://www.spinics.net/lists/linux-perf-users/msg03040.html +# https://software.intel.com/en-us/articles/linux-perf-for-intel-vtune-Amplifier-XE +# see the readme.md in this directory for a log of what perf features are enabled +function mason_compile { + cd tools/perf + # we set NO_LIBUNWIND since libdw is used from elfutils which is faster: https://lwn.net/Articles/579508/ + # note: LIBELF is needed for symbols + node --perf_basic_prof_only_functions + mkdir -p output-dir + rm -rf output-dir/* + make \ + O=output-dir \ + LIBDW_LDFLAGS="-L${MASON_ELFUTILS}/lib -Wl,--start-group -ldw -lelf -lebl -llzma -lz -lbz2 -ldl -L${MASON_BZIP2}/lib -L${MASON_XZ}/lib" \ + LIBDW_CFLAGS="-I${MASON_ELFUTILS}/include/" \ + V=1 VF=1 \ + prefix=${MASON_PREFIX} \ + NO_LIBNUMA=1 \ + NO_LIBAUDIT=1 \ + NO_LIBUNWIND=1 \ + NO_BIONIC=1 \ + NO_BACKTRACE=1 \ + NO_LIBCRYPTO=1 \ + NO_LIBPERL=1 \ + NO_GTK2=1 \ + LDFLAGS="${EXTRA_LDFLAGS} -Wl,--start-group -L${MASON_BINUTILS}/lib -lbfd -lopcodes -lelf -lz" \ + NO_LIBPYTHON=1 \ + WERROR=0 \ + EXTRA_CFLAGS="${EXTRA_CFLAGS}" \ + install +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/pixman/0.32.6/script.sh b/scripts/pixman/0.32.6/script.sh index 757560407..af7af35a0 100755 --- a/scripts/pixman/0.32.6/script.sh +++ b/scripts/pixman/0.32.6/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/pixman-1.pc function mason_load_source { mason_download \ - http://cairographics.org/releases/pixman-${MASON_VERSION}.tar.gz \ + https://cairographics.org/releases/pixman-${MASON_VERSION}.tar.gz \ ef6a79a704290fa28838d02faad3914fe9cbc895 mason_extract_tar_gz diff --git a/scripts/pixman/0.34.0/script.sh b/scripts/pixman/0.34.0/script.sh index 2d20f626e..5c1148703 100755 --- a/scripts/pixman/0.34.0/script.sh +++ b/scripts/pixman/0.34.0/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/pixman-1.pc function mason_load_source { mason_download \ - http://cairographics.org/releases/pixman-${MASON_VERSION}.tar.gz \ + https://cairographics.org/releases/pixman-${MASON_VERSION}.tar.gz \ 022e9e5856f4c5a8c9bdea3996c6b199683fce78 mason_extract_tar_gz diff --git a/scripts/postgis/2.2.2/script.sh b/scripts/postgis/2.2.2/script.sh index 28c1a4592..6c8ba8180 100755 --- a/scripts/postgis/2.2.2/script.sh +++ b/scripts/postgis/2.2.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/shp2pgsql function mason_load_source { mason_download \ - http://download.osgeo.org/postgis/source/postgis-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/postgis/source/postgis-${MASON_VERSION}.tar.gz \ e3a740fc6d9af5d567346f2729ee86af2b6da88c mason_extract_tar_gz diff --git a/scripts/postgis/2.2.2/test.sh b/scripts/postgis/2.2.2/test.sh deleted file mode 100755 index 6a92e0697..000000000 --- a/scripts/postgis/2.2.2/test.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash - -set -ue - -if [[ ${PGDATA:-unset} != "unset" ]] || [[ ${PGHOST:-unset} != "unset" ]] || [[ ${PGTEMP_DIR:-unset} != "unset" ]]; then - echo "ERROR: this script deletes \${PGDATA}, \${PGHOST}, and \${PGTEMP_DIR}." - echo "So it will not run if you have these set in your environment" - exit 1 -fi - -export GDAL_PREFIX=$(../../../mason prefix gdal 2.0.2) -# make sure we can init, start, create db, and stop -export PGDATA=./local-postgres -# PGHOST must start with / so therefore must be absolute path -export PGHOST=$(pwd)/local-unix-socket -export PGTEMP_DIR=$(pwd)/local-tmp -export PGPORT=1111 - -# cleanup -function cleanup() { - if [[ -d ${PGDATA} ]]; then rm -r ${PGDATA}; fi - if [[ -d ${PGTEMP_DIR} ]]; then rm -r ${PGTEMP_DIR}; fi - if [[ -d ${PGHOST} ]]; then rm -r ${PGHOST}; fi - rm -f postgres.log - rm -f seattle_washington_water_coast* - rm -f seattle_washington.water.coast* -} - -function setup() { - mkdir ${PGTEMP_DIR} - mkdir ${PGHOST} -} - -function finish { - ./mason_packages/.link/bin/pg_ctl -w stop - cleanup -} - -function pause(){ - read -p "$*" -} - -trap finish EXIT - -cleanup -setup - -if [[ ! -d ./mason_packages/.link ]]; then - ./script.sh link -fi - -./mason_packages/.link/bin/initdb -export PATH=./mason_packages/.link/bin/:${PATH} -export GDAL_DATA=${GDAL_PREFIX}/share/gdal -postgres -k $PGHOST > postgres.log & -sleep 2 -cat postgres.log -createdb template_postgis -psql -l -psql template_postgis -c "CREATE TABLESPACE temp_disk LOCATION '${PGTEMP_DIR}';" -psql template_postgis -c "SET temp_tablespaces TO 'temp_disk';" -psql template_postgis -c "CREATE EXTENSION postgis;" -psql template_postgis -c "SELECT PostGIS_Full_Version();" -curl -OL "https://s3.amazonaws.com/metro-extracts.mapzen.com/seattle_washington.water.coastline.zip" -unzip -o seattle_washington.water.coastline.zip -createdb test-osm -T template_postgis -shp2pgsql -s 4326 seattle_washington_water_coast.shp coast | psql test-osm -psql test-osm -c "SELECT count(*) from coast;" \ No newline at end of file diff --git a/scripts/postgis/2.3.2-1/script.sh b/scripts/postgis/2.3.2-1/script.sh index 45a7cd767..a3f9d1e23 100755 --- a/scripts/postgis/2.3.2-1/script.sh +++ b/scripts/postgis/2.3.2-1/script.sh @@ -9,7 +9,7 @@ MASON_LIB_FILE=bin/shp2pgsql function mason_load_source { mason_download \ - http://download.osgeo.org/postgis/source/postgis-${MASON_VERSION2}.tar.gz \ + https://download.osgeo.org/postgis/source/postgis-${MASON_VERSION2}.tar.gz \ 1afe92b14c9329f5ce5cc6a5dbe42575449d508e mason_extract_tar_gz diff --git a/scripts/postgis/2.3.2-1/test.sh b/scripts/postgis/2.3.2-1/test.sh deleted file mode 100755 index 72fdded88..000000000 --- a/scripts/postgis/2.3.2-1/test.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env bash - -set -ue - -: ' -Assumes gdal and postgis have been linked - -' - -if [[ ${PGDATA:-unset} != "unset" ]] || [[ ${PGHOST:-unset} != "unset" ]] || [[ ${PGTEMP_DIR:-unset} != "unset" ]]; then - echo "ERROR: this script deletes \${PGDATA}, \${PGHOST}, and \${PGTEMP_DIR}." - echo "So it will not run if you have these set in your environment" - exit 1 -fi - -# make sure we can init, start, create db, and stop -export PGDATA=./local-postgres -# PGHOST must start with / so therefore must be absolute path -export PGHOST=$(pwd)/local-unix-socket -export PGTEMP_DIR=$(pwd)/local-tmp -export PGPORT=1111 - -# cleanup -function cleanup() { - if [[ -d ${PGDATA} ]]; then rm -r ${PGDATA}; fi - if [[ -d ${PGTEMP_DIR} ]]; then rm -r ${PGTEMP_DIR}; fi - if [[ -d ${PGHOST} ]]; then rm -r ${PGHOST}; fi - rm -f postgres.log - rm -f seattle_washington_water_coast* - rm -f seattle_washington.water.coast* -} - -function setup() { - mkdir ${PGTEMP_DIR} - mkdir ${PGHOST} -} - -function finish { - ./mason_packages/.link/bin/pg_ctl -w stop - cleanup -} - -trap finish EXIT - -cleanup -setup - -./mason_packages/.link/bin/initdb -export PATH=./mason_packages/.link/bin/:${PATH} -# must be absolute -export GDAL_DATA=$(pwd)/mason_packages/.link/share/gdal -postgres -k $PGHOST > postgres.log & -sleep 2 -cat postgres.log -createdb template_postgis -psql -l -psql template_postgis -c "CREATE TABLESPACE temp_disk LOCATION '${PGTEMP_DIR}';" -psql template_postgis -c "SET temp_tablespaces TO 'temp_disk';" -psql template_postgis -c "CREATE EXTENSION postgis;" -psql template_postgis -c "SELECT PostGIS_Full_Version();" -curl -OL "https://s3.amazonaws.com/metro-extracts.mapzen.com/seattle_washington.water.coastline.zip" -unzip -o seattle_washington.water.coastline.zip -createdb test-osm -T template_postgis -shp2pgsql -s 4326 seattle_washington_water_coast.shp coast | psql test-osm -psql test-osm -c "SELECT count(*) from coast;" \ No newline at end of file diff --git a/scripts/postgis/2.3.2/script.sh b/scripts/postgis/2.3.2/script.sh index 87b1fcced..e52cbf5e3 100755 --- a/scripts/postgis/2.3.2/script.sh +++ b/scripts/postgis/2.3.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/shp2pgsql function mason_load_source { mason_download \ - http://download.osgeo.org/postgis/source/postgis-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/postgis/source/postgis-${MASON_VERSION}.tar.gz \ 1afe92b14c9329f5ce5cc6a5dbe42575449d508e mason_extract_tar_gz diff --git a/scripts/postgis/2.3.2/test.sh b/scripts/postgis/2.3.2/test.sh deleted file mode 100755 index 72fdded88..000000000 --- a/scripts/postgis/2.3.2/test.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env bash - -set -ue - -: ' -Assumes gdal and postgis have been linked - -' - -if [[ ${PGDATA:-unset} != "unset" ]] || [[ ${PGHOST:-unset} != "unset" ]] || [[ ${PGTEMP_DIR:-unset} != "unset" ]]; then - echo "ERROR: this script deletes \${PGDATA}, \${PGHOST}, and \${PGTEMP_DIR}." - echo "So it will not run if you have these set in your environment" - exit 1 -fi - -# make sure we can init, start, create db, and stop -export PGDATA=./local-postgres -# PGHOST must start with / so therefore must be absolute path -export PGHOST=$(pwd)/local-unix-socket -export PGTEMP_DIR=$(pwd)/local-tmp -export PGPORT=1111 - -# cleanup -function cleanup() { - if [[ -d ${PGDATA} ]]; then rm -r ${PGDATA}; fi - if [[ -d ${PGTEMP_DIR} ]]; then rm -r ${PGTEMP_DIR}; fi - if [[ -d ${PGHOST} ]]; then rm -r ${PGHOST}; fi - rm -f postgres.log - rm -f seattle_washington_water_coast* - rm -f seattle_washington.water.coast* -} - -function setup() { - mkdir ${PGTEMP_DIR} - mkdir ${PGHOST} -} - -function finish { - ./mason_packages/.link/bin/pg_ctl -w stop - cleanup -} - -trap finish EXIT - -cleanup -setup - -./mason_packages/.link/bin/initdb -export PATH=./mason_packages/.link/bin/:${PATH} -# must be absolute -export GDAL_DATA=$(pwd)/mason_packages/.link/share/gdal -postgres -k $PGHOST > postgres.log & -sleep 2 -cat postgres.log -createdb template_postgis -psql -l -psql template_postgis -c "CREATE TABLESPACE temp_disk LOCATION '${PGTEMP_DIR}';" -psql template_postgis -c "SET temp_tablespaces TO 'temp_disk';" -psql template_postgis -c "CREATE EXTENSION postgis;" -psql template_postgis -c "SELECT PostGIS_Full_Version();" -curl -OL "https://s3.amazonaws.com/metro-extracts.mapzen.com/seattle_washington.water.coastline.zip" -unzip -o seattle_washington.water.coastline.zip -createdb test-osm -T template_postgis -shp2pgsql -s 4326 seattle_washington_water_coast.shp coast | psql test-osm -psql test-osm -c "SELECT count(*) from coast;" \ No newline at end of file diff --git a/scripts/postgis/2.4.0/script.sh b/scripts/postgis/2.4.0/script.sh index 1820fbe59..2de4c63a8 100755 --- a/scripts/postgis/2.4.0/script.sh +++ b/scripts/postgis/2.4.0/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/shp2pgsql function mason_load_source { mason_download \ - http://download.osgeo.org/postgis/source/postgis-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/postgis/source/postgis-${MASON_VERSION}.tar.gz \ 70363fffe2eedfcd6fd24908090f66abc2acb9a5 mason_extract_tar_gz diff --git a/scripts/postgis/2.4.0/test.sh b/scripts/postgis/2.4.0/test.sh deleted file mode 100755 index 5a9b8da5d..000000000 --- a/scripts/postgis/2.4.0/test.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bash - -set -ue - -: ' -Assumes gdal and postgis have been linked - -' - -if [[ ${PGDATA:-unset} != "unset" ]] || [[ ${PGHOST:-unset} != "unset" ]] || [[ ${PGTEMP_DIR:-unset} != "unset" ]]; then - echo "ERROR: this script deletes \${PGDATA}, \${PGHOST}, and \${PGTEMP_DIR}." - echo "So it will not run if you have these set in your environment" - exit 1 -fi - -# make sure we can init, start, create db, and stop -export PGDATA=./local-postgres -# PGHOST must start with / so therefore must be absolute path -export PGHOST=$(pwd)/local-unix-socket -export PGTEMP_DIR=$(pwd)/local-tmp -export PGPORT=1111 - -# cleanup -function cleanup() { - if [[ -d ${PGDATA} ]]; then rm -r ${PGDATA}; fi - if [[ -d ${PGTEMP_DIR} ]]; then rm -r ${PGTEMP_DIR}; fi - if [[ -d ${PGHOST} ]]; then rm -r ${PGHOST}; fi - rm -f postgres.log - rm -f seattle_washington_water_coast* - rm -f seattle_washington.water.coast* -} - -function setup() { - mkdir ${PGTEMP_DIR} - mkdir ${PGHOST} -} - -function finish { - ./mason_packages/.link/bin/pg_ctl -w stop - cleanup -} - -trap finish EXIT - -cleanup -setup - -./mason_packages/.link/bin/initdb -export PATH=./mason_packages/.link/bin/:${PATH} -# must be absolute -export GDAL_DATA=$(pwd)/mason_packages/.link/share/gdal -postgres -k $PGHOST > postgres.log & -sleep 2 -cat postgres.log -createdb template_postgis -psql -l -psql template_postgis -c "CREATE TABLESPACE temp_disk LOCATION '${PGTEMP_DIR}';" -psql template_postgis -c "SET temp_tablespaces TO 'temp_disk';" -psql template_postgis -c "CREATE EXTENSION postgis;" -psql template_postgis -c "SELECT PostGIS_Full_Version();" -psql template_postgis -c "SELECT ST_AsGeoJSON(ST_GeomFromGeoJSON('{ \"type\": \"Point\", \"coordinates\": [0,0] }'));" -curl -OL "https://s3.amazonaws.com/metro-extracts.mapzen.com/seattle_washington.water.coastline.zip" -unzip -o seattle_washington.water.coastline.zip -createdb test-osm -T template_postgis -shp2pgsql -s 4326 seattle_washington_water_coast.shp coast | psql test-osm -psql test-osm -c "SELECT count(*) from coast;" \ No newline at end of file diff --git a/scripts/postgis/2.4.1/script.sh b/scripts/postgis/2.4.1/script.sh index a3aaee092..d6ae8a4c3 100755 --- a/scripts/postgis/2.4.1/script.sh +++ b/scripts/postgis/2.4.1/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/shp2pgsql function mason_load_source { mason_download \ - http://download.osgeo.org/postgis/source/postgis-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/postgis/source/postgis-${MASON_VERSION}.tar.gz \ 2c4bcef6872fe09604bfd10655c136a4e96a528b mason_extract_tar_gz diff --git a/scripts/postgis/2.4.1/test.sh b/scripts/postgis/2.4.1/test.sh deleted file mode 100755 index 5a9b8da5d..000000000 --- a/scripts/postgis/2.4.1/test.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bash - -set -ue - -: ' -Assumes gdal and postgis have been linked - -' - -if [[ ${PGDATA:-unset} != "unset" ]] || [[ ${PGHOST:-unset} != "unset" ]] || [[ ${PGTEMP_DIR:-unset} != "unset" ]]; then - echo "ERROR: this script deletes \${PGDATA}, \${PGHOST}, and \${PGTEMP_DIR}." - echo "So it will not run if you have these set in your environment" - exit 1 -fi - -# make sure we can init, start, create db, and stop -export PGDATA=./local-postgres -# PGHOST must start with / so therefore must be absolute path -export PGHOST=$(pwd)/local-unix-socket -export PGTEMP_DIR=$(pwd)/local-tmp -export PGPORT=1111 - -# cleanup -function cleanup() { - if [[ -d ${PGDATA} ]]; then rm -r ${PGDATA}; fi - if [[ -d ${PGTEMP_DIR} ]]; then rm -r ${PGTEMP_DIR}; fi - if [[ -d ${PGHOST} ]]; then rm -r ${PGHOST}; fi - rm -f postgres.log - rm -f seattle_washington_water_coast* - rm -f seattle_washington.water.coast* -} - -function setup() { - mkdir ${PGTEMP_DIR} - mkdir ${PGHOST} -} - -function finish { - ./mason_packages/.link/bin/pg_ctl -w stop - cleanup -} - -trap finish EXIT - -cleanup -setup - -./mason_packages/.link/bin/initdb -export PATH=./mason_packages/.link/bin/:${PATH} -# must be absolute -export GDAL_DATA=$(pwd)/mason_packages/.link/share/gdal -postgres -k $PGHOST > postgres.log & -sleep 2 -cat postgres.log -createdb template_postgis -psql -l -psql template_postgis -c "CREATE TABLESPACE temp_disk LOCATION '${PGTEMP_DIR}';" -psql template_postgis -c "SET temp_tablespaces TO 'temp_disk';" -psql template_postgis -c "CREATE EXTENSION postgis;" -psql template_postgis -c "SELECT PostGIS_Full_Version();" -psql template_postgis -c "SELECT ST_AsGeoJSON(ST_GeomFromGeoJSON('{ \"type\": \"Point\", \"coordinates\": [0,0] }'));" -curl -OL "https://s3.amazonaws.com/metro-extracts.mapzen.com/seattle_washington.water.coastline.zip" -unzip -o seattle_washington.water.coastline.zip -createdb test-osm -T template_postgis -shp2pgsql -s 4326 seattle_washington_water_coast.shp coast | psql test-osm -psql test-osm -c "SELECT count(*) from coast;" \ No newline at end of file diff --git a/scripts/postgis/2.5.2/.travis.yml b/scripts/postgis/2.5.2/.travis.yml new file mode 100644 index 000000000..ca922876b --- /dev/null +++ b/scripts/postgis/2.5.2/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/postgis/2.5.2/script.sh b/scripts/postgis/2.5.2/script.sh new file mode 100755 index 000000000..e9cb2e917 --- /dev/null +++ b/scripts/postgis/2.5.2/script.sh @@ -0,0 +1,195 @@ +#!/usr/bin/env bash + +MASON_NAME=postgis +MASON_VERSION=2.5.2 +MASON_LIB_FILE=bin/shp2pgsql + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://download.osgeo.org/postgis/source/postgis-${MASON_VERSION}.tar.gz \ + e2e23b177f889a8f00c67737ac80180abed0f83d + + mason_extract_tar_gz + export MASON_BUILD_PATH=${MASON_ROOT}/.build/postgis-${MASON_VERSION} +} + +function mason_prepare_compile { + # This line is critical: it ensures that we install deps in + # the parent folder rather than within the ./build directory + # such that our modifications to the .la files work + cd $(dirname ${MASON_ROOT}) + # set up to fix libtool .la files + # https://github.com/mapbox/mason/issues/61 + if [[ $(uname -s) == 'Darwin' ]]; then + FIND="\/Users\/travis\/build\/mapbox\/mason" + else + FIND="\/home\/travis\/build\/mapbox\/mason" + fi + REPLACE="$(pwd)" + REPLACE=${REPLACE////\\/} + LIBTIFF_VERSION="4.0.8" + PROJ_VERSION="4.9.3" + JPEG_VERSION="1.5.2" + PNG_VERSION="1.6.32" + EXPAT_VERSION="2.2.4" + POSTGRES_VERSION="10.3" + XML2_VERSION="2.9.4" + GEOS_VERSION="3.6.2" + GDAL_VERSION="2.4.1" + SQLITE_VERSION="3.21.0" + JSON_C_VERSION="0.12.1" + PROTOBUF_VERSION="3.4.1" # must match the version compiled into protobuf C + PROTOBUF_C_VERSION="1.3.0" + ${MASON_DIR}/mason install postgres ${POSTGRES_VERSION} + MASON_POSTGRES=$(${MASON_DIR}/mason prefix postgres ${POSTGRES_VERSION}) + ${MASON_DIR}/mason install libxml2 ${XML2_VERSION} + MASON_XML2=$(${MASON_DIR}/mason prefix libxml2 ${XML2_VERSION}) + ${MASON_DIR}/mason install geos ${GEOS_VERSION} + MASON_GEOS=$(${MASON_DIR}/mason prefix geos ${GEOS_VERSION}) + ${MASON_DIR}/mason install libtiff ${LIBTIFF_VERSION} + MASON_TIFF=$(${MASON_DIR}/mason prefix libtiff ${LIBTIFF_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_TIFF}/lib/libtiff.la + ${MASON_DIR}/mason install proj ${PROJ_VERSION} + MASON_PROJ=$(${MASON_DIR}/mason prefix proj ${PROJ_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_PROJ}/lib/libproj.la + ${MASON_DIR}/mason install jpeg_turbo ${JPEG_VERSION} + MASON_JPEG=$(${MASON_DIR}/mason prefix jpeg_turbo ${JPEG_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_JPEG}/lib/libjpeg.la + ${MASON_DIR}/mason install libpng ${PNG_VERSION} + MASON_PNG=$(${MASON_DIR}/mason prefix libpng ${PNG_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_PNG}/lib/libpng.la + ${MASON_DIR}/mason install expat ${EXPAT_VERSION} + MASON_EXPAT=$(${MASON_DIR}/mason prefix expat ${EXPAT_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_EXPAT}/lib/libexpat.la + ${MASON_DIR}/mason install json-c ${JSON_C_VERSION} + MASON_JSON_C=$(${MASON_DIR}/mason prefix json-c ${JSON_C_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_JSON_C}/lib/libjson-c.la + ${MASON_DIR}/mason install protobuf_c ${PROTOBUF_C_VERSION} + MASON_PROTOBUF_C=$(${MASON_DIR}/mason prefix protobuf_c ${PROTOBUF_C_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_PROTOBUF_C}/lib/libprotobuf-c.la + ${MASON_DIR}/mason install protobuf ${PROTOBUF_VERSION} + MASON_PROTOBUF=$(${MASON_DIR}/mason prefix protobuf ${PROTOBUF_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_PROTOBUF}/lib/libprotobuf-lite.la + ${MASON_DIR}/mason install libpq ${POSTGRES_VERSION} + MASON_LIBPQ=$(${MASON_DIR}/mason prefix libpq ${POSTGRES_VERSION}) + ${MASON_DIR}/mason install zlib system + MASON_ZLIB=$(${MASON_DIR}/mason prefix zlib system) + #${MASON_DIR}/mason install iconv system + #MASON_ICONV=$(${MASON_DIR}/mason prefix iconv system) + ${MASON_DIR}/mason install gdal ${GDAL_VERSION} + MASON_GDAL=$(${MASON_DIR}/mason prefix gdal ${GDAL_VERSION}) + ln -sf ${MASON_GDAL}/include ${MASON_GDAL}/include/gdal + ${MASON_DIR}/mason install sqlite ${SQLITE_VERSION} + MASON_SQLITE=$(${MASON_DIR}/mason prefix sqlite ${SQLITE_VERSION}) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_SQLITE}/lib/libsqlite3.la + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_GDAL}/lib/libgdal.la + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_PROJ}/lib/libproj.la + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_XML2}/lib/libxml2.la + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_XML2}/bin/xml2-config + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_GEOS}/lib/libgeos.la + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_GEOS}/lib/libgeos_c.la + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_GEOS}/bin/geos-config + +} + +function mason_compile { + # put protoc-c on path (comes from protobuf_c) + export PATH=${MASON_PROTOBUF_C}/bin:${PATH} + which protoc-c + export LDFLAGS="${LDFLAGS} \ + -L${MASON_GDAL}/lib -lgdal \ + -L${MASON_GEOS}/lib -lgeos_c -lgeos\ + -L${MASON_ZLIB}/lib -lz \ + -L${MASON_TIFF}/lib -ltiff \ + -L${MASON_JPEG}/lib -ljpeg \ + -L${MASON_PROJ}/lib -lsqlite3 \ + -L${MASON_PROJ}/lib -lproj \ + -L${MASON_PNG}/lib -lpng \ + -L${MASON_JSON_C}/lib -ljson-c \ + -L${MASON_PROTOBUF_C}/lib -lprotobuf-c \ + -L${MASON_PROTOBUF}/lib -lprotobuf-lite \ + -L${MASON_EXPAT}/lib -lexpat \ + -L${MASON_XML2}/lib -lxml2" + export CFLAGS="${CFLAGS} -O3 -DNDEBUG -I$(pwd)/liblwgeom/ \ + -I$(pwd)/raster/ -I$(pwd)/raster/rt_core/ \ + -I${MASON_TIFF}/include \ + -I${MASON_JPEG}/include \ + -I${MASON_SQLITE}/include \ + -I${MASON_PROJ}/include \ + -I${MASON_PNG}/include \ + -I${MASON_EXPAT}/include \ + -I${MASON_GDAL}/include \ + -I${MASON_JSON_C}/include \ + -I${MASON_PROTOBUF_C}/include \ + -I${MASON_PROTOBUF}/include \ + -I${MASON_POSTGRES}/include/server \ + -I${MASON_GEOS}/include \ + -I${MASON_XML2}/include/libxml2" + + if [[ $(uname -s) == 'Darwin' ]]; then + export LDFLAGS="${LDFLAGS} -Wl,-lc++ -Wl,${MASON_GDAL}/lib/libgdal.a -Wl,${MASON_POSTGRES}/lib/libpq.a -liconv" + else + export LDFLAGS="${LDFLAGS} ${MASON_GDAL}/lib/libgdal.a -lgeos_c -lgeos -lxml2 -lproj -lexpat -lpng -ljson-c -lprotobuf-c -lprotobuf-lite -ltiff -ljpeg ${MASON_POSTGRES}/lib/libpq.a -pthread -ldl -lz -lstdc++ -lm" + fi + + + MASON_LIBPQ_PATH=${MASON_POSTGRES}/lib/libpq.a + MASON_LIBPQ_PATH2=${MASON_LIBPQ_PATH////\\/} + perl -i -p -e "s/\-lpq/${MASON_LIBPQ_PATH2} -pthread/g;" configure + perl -i -p -e "s/librtcore\.a/librtcore\.a \.\.\/\.\.\/liblwgeom\/\.libs\/liblwgeom\.a/g;" raster/loader/Makefile.in + + if [[ $(uname -s) == 'Linux' ]]; then + # help initGEOS configure check + perl -i -p -e "s/\-lgeos_c /\-lgeos_c \-lgeos \-lstdc++ \-lm /g;" configure + # help GDALAllRegister configure check + CMD="data=open('./configure','r').read();open('./configure','w')" + CMD="${CMD}.write(data.replace('\`\$GDAL_CONFIG --libs\`','\"-lgdal -lgeos_c -lgeos -lxml2 -lproj -lexpat -lpng -ljson-c -lprotobuf-c -lprotobuf-lite -ltiff -ljpeg ${MASON_POSTGRES}/lib/libpq.a -pthread -ldl -lz -lstdc++ -lm\"'))" + python -c "${CMD}" + fi + + ./configure \ + --enable-static --disable-shared \ + --prefix=$(mktemp -d) \ + ${MASON_HOST_ARG} \ + --with-projdir=${MASON_PROJ} \ + --with-geosconfig=${MASON_GEOS}/bin/geos-config \ + --with-pgconfig=${MASON_POSTGRES}/bin/pg_config \ + --with-xml2config=${MASON_XML2}/bin/xml2-config \ + --with-gdalconfig=${MASON_GDAL}/bin/gdal-config \ + --with-jsondir=${MASON_JSON_C} \ + --with-protobufdir=${MASON_PROTOBUF_C} \ + --without-gui \ + --with-topology \ + --with-raster \ + --with-sfcgal=no \ + --without-sfcgal \ + --disable-nls || (cat config.log && exit 1) + # -j${MASON_CONCURRENCY} disabled due to https://trac.osgeo.org/postgis/ticket/3345 + make LDFLAGS="$LDFLAGS" CFLAGS="$CFLAGS" + make install LDFLAGS="$LDFLAGS" CFLAGS="$CFLAGS" + # the meat of postgis installs into postgres directory + # so we actually want to package postgres with the postgis stuff + # inside, so here we symlink it + mkdir -p $(dirname $MASON_PREFIX) + ln -sf ${MASON_POSTGRES} ${MASON_PREFIX} +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/postgres/10.3/.travis.yml b/scripts/postgres/10.3/.travis.yml new file mode 100644 index 000000000..8c71516f9 --- /dev/null +++ b/scripts/postgres/10.3/.travis.yml @@ -0,0 +1,13 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + compiler: clang + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/postgres/10.3/patch.diff b/scripts/postgres/10.3/patch.diff new file mode 100644 index 000000000..ae2f06a46 --- /dev/null +++ b/scripts/postgres/10.3/patch.diff @@ -0,0 +1,11 @@ +--- src/include/pg_config_manual.h 2013-10-07 20:17:38.000000000 -0700 ++++ src/include/pg_config_manual.h 2014-03-08 21:29:48.000000000 -0800 +@@ -144,7 +144,7 @@ + * here's where to twiddle it. You can also override this at runtime + * with the postmaster's -k switch. + */ +-#define DEFAULT_PGSOCKET_DIR "/tmp" ++#define DEFAULT_PGSOCKET_DIR "/var/run/postgresql" + + /* + * The random() function is expected to yield values between 0 and diff --git a/scripts/postgres/10.3/script.sh b/scripts/postgres/10.3/script.sh new file mode 100755 index 000000000..a4948bc4c --- /dev/null +++ b/scripts/postgres/10.3/script.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +MASON_NAME=postgres +MASON_VERSION=10.3 +MASON_LIB_FILE=bin/psql +MASON_PKGCONFIG_FILE=lib/pkgconfig/libpq.pc + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ + e1590a4b2167dcdf164eb887cf83e7da9e155771 + + mason_extract_tar_bz2 + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/postgresql-${MASON_VERSION} +} + +function mason_prepare_compile { + LIBEDIT_VERSION="3.1" + NCURSES_VERSION="6.1" + CCACHE_VERSION=3.3.1 + ${MASON_DIR}/mason install ccache ${CCACHE_VERSION} + MASON_CCACHE=$(${MASON_DIR}/mason prefix ccache ${CCACHE_VERSION}) + ${MASON_DIR}/mason install libedit ${LIBEDIT_VERSION} + MASON_LIBEDIT=$(${MASON_DIR}/mason prefix libedit ${LIBEDIT_VERSION}) + ${MASON_DIR}/mason install ncurses ${NCURSES_VERSION} + MASON_NCURSES=$(${MASON_DIR}/mason prefix ncurses ${NCURSES_VERSION}) +} + +function mason_compile { + if [[ ${MASON_PLATFORM} == 'linux' ]]; then + mason_step "Loading patch" + patch src/include/pg_config_manual.h ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff + fi + + # note CFLAGS overrides defaults (-Wall -Wmissing-prototypes -Wpointer-arith -Wdeclaration-after-statement -Wendif-labels -Wmissing-format-attribute -Wformat-security -fno-strict-aliasing -fwrapv -Wno-unused-command-line-argument) so we need to add optimization flags back + export CFLAGS="${CFLAGS} -O3 -DNDEBUG -Wall -Wmissing-prototypes -Wpointer-arith -Wdeclaration-after-statement -Wendif-labels -Wmissing-format-attribute -Wformat-security -fno-strict-aliasing -fwrapv -Wno-unused-command-line-argument" + export CFLAGS="-I${MASON_LIBEDIT}/include -I${MASON_NCURSES}/include ${CFLAGS}" + export LDFLAGS="-L${MASON_LIBEDIT}/lib -L${MASON_NCURSES}/lib ${LDFLAGS}" + ./configure \ + --prefix=${MASON_PREFIX} \ + ${MASON_HOST_ARG} \ + --enable-thread-safety \ + --enable-largefile \ + --with-python \ + --with-zlib \ + --without-bonjour \ + --without-openssl \ + --without-pam \ + --without-gssapi \ + --without-ossp-uuid \ + --with-readline \ + --with-libedit-preferred \ + --without-ldap \ + --without-libxml \ + --without-libxslt \ + --without-selinux \ + --without-perl \ + --without-tcl \ + --disable-rpath \ + --disable-debug \ + --disable-profiling \ + --disable-coverage \ + --disable-dtrace \ + --disable-depend \ + --disable-cassert + + make -j${MASON_CONCURRENCY} -C src/interfaces/libpq/ install + rm -f src/interfaces/libpq{*.so*,*.dylib} + rm -f ${MASON_PREFIX}/lib/libpq{*.so*,*.dylib} + MASON_LIBPQ_PATH=${MASON_PREFIX}/lib/libpq.a + MASON_LIBPQ_PATH2=${MASON_LIBPQ_PATH////\\/} + MASON_LIBEDIT_PATH=${MASON_LIBEDIT}/lib/libedit.a + MASON_LIBEDIT_PATH=${MASON_LIBEDIT_PATH////\\/} + MASON_NCURSES_PATH=${MASON_NCURSES}/lib/libncurses.a + MASON_NCURSES_PATH=${MASON_NCURSES_PATH////\\/} + perl -i -p -e "s/\-lncurses/${MASON_NCURSES_PATH}/g;" src/backend/Makefile + perl -i -p -e "s/\-lncurses/${MASON_NCURSES_PATH}/g;" src/Makefile.global + perl -i -p -e "s/\-lncurses/${MASON_NCURSES_PATH}/g;" configure + perl -i -p -e "s/\-lncurses/${MASON_NCURSES_PATH}/g;" config/programs.m4 + perl -i -p -e "s/\-ledit/${MASON_LIBEDIT_PATH}/g;" src/Makefile.global.in + perl -i -p -e "s/\-ledit/${MASON_LIBEDIT_PATH}/g;" src/Makefile.global + perl -i -p -e "s/\-lpq/${MASON_LIBPQ_PATH2} -pthread/g;" src/Makefile.global.in + perl -i -p -e "s/\-lpq/${MASON_LIBPQ_PATH2} -pthread/g;" src/Makefile.global + make -j${MASON_CONCURRENCY} install + make -j${MASON_CONCURRENCY} -C contrib install + rm -f ${MASON_PREFIX}/lib/lib{*.so*,*.dylib} +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/postgres/9.5.2/script.sh b/scripts/postgres/9.5.2/script.sh index 0796ca415..45cf98535 100755 --- a/scripts/postgres/9.5.2/script.sh +++ b/scripts/postgres/9.5.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/psql function mason_load_source { mason_download \ - http://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ + https://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ 9c7bd5c1c601075ff6d5ea7615f9461d5b1f4c88 mason_extract_tar_bz2 diff --git a/scripts/postgres/9.6.1/script.sh b/scripts/postgres/9.6.1/script.sh index 8ca3af308..afe6d3230 100755 --- a/scripts/postgres/9.6.1/script.sh +++ b/scripts/postgres/9.6.1/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/psql function mason_load_source { mason_download \ - http://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ + https://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ 6aef3fb521aaf987a9363a314ff7d5539b6601cd mason_extract_tar_bz2 diff --git a/scripts/postgres/9.6.2-1/script.sh b/scripts/postgres/9.6.2-1/script.sh index c1332e4b6..ff979a917 100755 --- a/scripts/postgres/9.6.2-1/script.sh +++ b/scripts/postgres/9.6.2-1/script.sh @@ -10,7 +10,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpq.pc function mason_load_source { mason_download \ - http://ftp.postgresql.org/pub/source/v${MASON_VERSION2}/postgresql-${MASON_VERSION2}.tar.bz2 \ + https://ftp.postgresql.org/pub/source/v${MASON_VERSION2}/postgresql-${MASON_VERSION2}.tar.bz2 \ 183f73527051430934a20bf08646b16373cddcca mason_extract_tar_bz2 diff --git a/scripts/postgres/9.6.2/script.sh b/scripts/postgres/9.6.2/script.sh index 32e4a7d70..0c7a61b35 100755 --- a/scripts/postgres/9.6.2/script.sh +++ b/scripts/postgres/9.6.2/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpq.pc function mason_load_source { mason_download \ - http://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ + https://ftp.postgresql.org/pub/source/v${MASON_VERSION}/postgresql-${MASON_VERSION}.tar.bz2 \ 183f73527051430934a20bf08646b16373cddcca mason_extract_tar_bz2 diff --git a/scripts/postgres/9.6.5/script.sh b/scripts/postgres/9.6.5/script.sh index a8197eb83..8535f75cf 100755 --- a/scripts/postgres/9.6.5/script.sh +++ b/scripts/postgres/9.6.5/script.sh @@ -10,7 +10,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libpq.pc function mason_load_source { mason_download \ - http://ftp.postgresql.org/pub/source/v${MASON_VERSION2}/postgresql-${MASON_VERSION2}.tar.bz2 \ + https://ftp.postgresql.org/pub/source/v${MASON_VERSION2}/postgresql-${MASON_VERSION2}.tar.bz2 \ de4007bbb8a5869cc3f193ae34b2fbd9e4b876c4 mason_extract_tar_bz2 @@ -18,6 +18,12 @@ function mason_load_source { export MASON_BUILD_PATH=${MASON_ROOT}/.build/postgresql-${MASON_VERSION2} } +function mason_prepare_compile { + LIBEDIT_VERSION="3.1" + ${MASON_DIR}/mason install libedit ${LIBEDIT_VERSION} + MASON_LIBEDIT=$(${MASON_DIR}/mason prefix libedit ${LIBEDIT_VERSION}) +} + function mason_compile { if [[ ${MASON_PLATFORM} == 'linux' ]]; then mason_step "Loading patch" @@ -38,7 +44,8 @@ function mason_compile { --without-pam \ --without-gssapi \ --without-ossp-uuid \ - --without-readline \ + --with-readline \ + --with-libedit-preferred \ --without-ldap \ --without-libxml \ --without-libxslt \ diff --git a/scripts/proj/4.8.0/script.sh b/scripts/proj/4.8.0/script.sh index 6fe475ab6..462468e15 100755 --- a/scripts/proj/4.8.0/script.sh +++ b/scripts/proj/4.8.0/script.sh @@ -9,7 +9,7 @@ MASON_LIB_FILE=lib/libproj.a function mason_load_source { mason_download \ - http://download.osgeo.org/proj/proj-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/proj/proj-${MASON_VERSION}.tar.gz \ 531953338fd3167670cafb44ca59bd58eaa8712d mason_extract_tar_gz @@ -18,7 +18,7 @@ function mason_load_source { } function mason_compile { - curl --retry 3 -f -# -L http://download.osgeo.org/proj/proj-datumgrid-1.5.zip -o proj-datumgrid-1.5.zip + curl --retry 3 -f -# -L https://download.osgeo.org/proj/proj-datumgrid-1.5.zip -o proj-datumgrid-1.5.zip cd nad unzip -o ../proj-datumgrid-1.5.zip cd ../ diff --git a/scripts/proj/4.9.2/script.sh b/scripts/proj/4.9.2/script.sh index b7c4c9770..f6d26b7a0 100755 --- a/scripts/proj/4.9.2/script.sh +++ b/scripts/proj/4.9.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libproj.a function mason_load_source { mason_download \ - http://download.osgeo.org/proj/proj-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/proj/proj-${MASON_VERSION}.tar.gz \ d9d35af05af0d43464c280d14e24bf3743494daf mason_extract_tar_gz @@ -17,7 +17,7 @@ function mason_load_source { } function mason_compile { - curl --retry 3 -f -# -L http://download.osgeo.org/proj/proj-datumgrid-1.5.zip -o proj-datumgrid-1.5.zip + curl --retry 3 -f -# -L https://download.osgeo.org/proj/proj-datumgrid-1.5.zip -o proj-datumgrid-1.5.zip cd nad unzip -o ../proj-datumgrid-1.5.zip cd ../ diff --git a/scripts/proj/4.9.3/script.sh b/scripts/proj/4.9.3/script.sh index 9f4ab742a..255e1c55d 100755 --- a/scripts/proj/4.9.3/script.sh +++ b/scripts/proj/4.9.3/script.sh @@ -9,7 +9,7 @@ GRID_VERSION="1.6" function mason_load_source { mason_download \ - http://download.osgeo.org/proj/proj-${MASON_VERSION}.tar.gz \ + https://download.osgeo.org/proj/proj-${MASON_VERSION}.tar.gz \ e3426c86eb2b834de78bf6535eff60d2ff521120 mason_extract_tar_gz @@ -18,7 +18,7 @@ function mason_load_source { } function mason_compile { - curl --retry 3 -f -# -L http://download.osgeo.org/proj/proj-datumgrid-${GRID_VERSION}.zip -o proj-datumgrid-${GRID_VERSION}.zip + curl --retry 3 -f -# -L https://download.osgeo.org/proj/proj-datumgrid-${GRID_VERSION}.zip -o proj-datumgrid-${GRID_VERSION}.zip cd nad unzip -o ../proj-datumgrid-${GRID_VERSION}.zip cd ../ diff --git a/scripts/proj/6.1.0/.travis.yml b/scripts/proj/6.1.0/.travis.yml new file mode 100644 index 000000000..ca922876b --- /dev/null +++ b/scripts/proj/6.1.0/.travis.yml @@ -0,0 +1,19 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode8.2 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/proj/6.1.0/script.sh b/scripts/proj/6.1.0/script.sh new file mode 100755 index 000000000..059ee3362 --- /dev/null +++ b/scripts/proj/6.1.0/script.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +MASON_NAME=proj +MASON_VERSION=6.1.0 +MASON_LIB_FILE=lib/libproj.a +GRID_VERSION="1.6" + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://download.osgeo.org/proj/proj-${MASON_VERSION}.tar.gz \ + 6a5162cf81b3e82df02d54fc491a28c65594218d + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + curl --retry 3 -f -# -L https://download.osgeo.org/proj/proj-datumgrid-${GRID_VERSION}.zip -o proj-datumgrid-${GRID_VERSION}.zip + cd data + unzip -o ../proj-datumgrid-${GRID_VERSION}.zip + cd ../ + # note CFLAGS overrides defaults (-g -O2) so we need to add optimization flags back + export CFLAGS="${CFLAGS} -O3 -DNDEBUG" + ./configure --prefix=${MASON_PREFIX} \ + --without-mutex ${MASON_HOST_ARG} \ + --with-jni=no \ + --enable-static \ + --disable-shared \ + --disable-dependency-tracking + + make -j${MASON_CONCURRENCY} + make install +} + +function mason_cflags { + echo -I${MASON_PREFIX}/include +} + +function mason_ldflags { + echo "-lproj" +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/proj/7.2.1/.travis.yml b/scripts/proj/7.2.1/.travis.yml new file mode 100644 index 000000000..70b79e7e3 --- /dev/null +++ b/scripts/proj/7.2.1/.travis.yml @@ -0,0 +1,20 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode9.4 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-4.9-dev + - xutils-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/proj/7.2.1/script.sh b/scripts/proj/7.2.1/script.sh new file mode 100755 index 000000000..ada785dd5 --- /dev/null +++ b/scripts/proj/7.2.1/script.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash + +MASON_NAME=proj +MASON_VERSION=7.2.1 +MASON_LIB_FILE=lib/libproj.a +PROJ_DATA_VERSION="1.4" +SQLITE_VERSION=3.34.0 +LIBTIFF_VERSION=4.0.7 +JPEG_TURBO_VERSION=1.5.1 + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://download.osgeo.org/proj/proj-${MASON_VERSION}.tar.gz \ + 92b57d23bf86fc985bf33f5bf82b3ada0820cab0 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_prepare_compile { + ${MASON_DIR}/mason install sqlite ${SQLITE_VERSION} + ${MASON_DIR}/mason link sqlite ${SQLITE_VERSION} + MASON_SQLITE=$(${MASON_DIR}/mason prefix sqlite ${SQLITE_VERSION}) + ${MASON_DIR}/mason install libtiff ${LIBTIFF_VERSION} + MASON_LIBTIFF=$(${MASON_DIR}/mason prefix libtiff ${LIBTIFF_VERSION}) + ${MASON_DIR}/mason install jpeg_turbo ${JPEG_TURBO_VERSION} +} + +function mason_compile { + #curl --retry 3 -f -# -L https://download.osgeo.org/proj/proj-data-${PROJ_DATA_VERSION}.tar.gz -o proj-data-${PROJ_DATA_VERSION}.tar.gz + export PATH="${MASON_ROOT}/.link/bin:${PATH}" + export PKG_CONFIG_PATH="${MASON_SQLITE}/lib/pkgconfig:${MASON_LIBTIFF}/lib/pkgconfig" + export CXXFLAGS="${CXXFLAGS} -O3 -DNDEBUG" + ./configure --prefix=${MASON_PREFIX} \ + ${MASON_HOST_ARG} \ + --enable-static \ + --disable-shared \ + --disable-dependency-tracking \ + --without-curl + echo `sqlite3 --version` + make -j${MASON_CONCURRENCY} + make install + #cd ${MASON_PREFIX}/share/proj + #tar xvfz proj-data-${PROJ_DATA_VERSION}.tar.gz +} + +function mason_cflags { + echo -I${MASON_PREFIX}/include +} + +function mason_ldflags { + echo "-lproj" +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/proj/8.0.0/.travis.yml b/scripts/proj/8.0.0/.travis.yml new file mode 100644 index 000000000..067607e24 --- /dev/null +++ b/scripts/proj/8.0.0/.travis.yml @@ -0,0 +1,20 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode12.2 + compiler: clang + - os: linux + sudo: false + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - libstdc++-6-dev + - xutils-dev + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/proj/8.0.0/script.sh b/scripts/proj/8.0.0/script.sh new file mode 100755 index 000000000..823889794 --- /dev/null +++ b/scripts/proj/8.0.0/script.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash + +MASON_NAME=proj +MASON_VERSION=8.0.0 +MASON_LIB_FILE=lib/libproj.a +PROJ_DATA_VERSION="1.5" +SQLITE_VERSION=3.34.0 +LIBTIFF_VERSION=4.0.8 +JPEG_TURBO_VERSION=1.5.2 + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://download.osgeo.org/proj/proj-${MASON_VERSION}.tar.gz \ + e3b0dcb6c58a92f2fa5a54366089693793847ce0 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_prepare_compile { + ${MASON_DIR}/mason install sqlite ${SQLITE_VERSION} + ${MASON_DIR}/mason link sqlite ${SQLITE_VERSION} + MASON_SQLITE=$(${MASON_DIR}/mason prefix sqlite ${SQLITE_VERSION}) + ${MASON_DIR}/mason install libtiff ${LIBTIFF_VERSION} + MASON_LIBTIFF=$(${MASON_DIR}/mason prefix libtiff ${LIBTIFF_VERSION}) + ${MASON_DIR}/mason install jpeg_turbo ${JPEG_TURBO_VERSION} +} + +function mason_compile { + #curl --retry 3 -f -# -L https://download.osgeo.org/proj/proj-data-${PROJ_DATA_VERSION}.tar.gz -o proj-data-${PROJ_DATA_VERSION}.tar.gz + export PATH="${MASON_ROOT}/.link/bin:${PATH}" + export PKG_CONFIG_PATH="${MASON_SQLITE}/lib/pkgconfig:${MASON_LIBTIFF}/lib/pkgconfig" + export CXXFLAGS="${CXXFLAGS} -O3 -DNDEBUG" + ./configure --prefix=${MASON_PREFIX} \ + ${MASON_HOST_ARG} \ + --enable-static \ + --disable-shared \ + --disable-dependency-tracking \ + --without-curl + echo `sqlite3 --version` + make -j${MASON_CONCURRENCY} + make install + #cd ${MASON_PREFIX}/share/proj + #tar xvfz proj-data-${PROJ_DATA_VERSION}.tar.gz +} + +function mason_cflags { + echo -I${MASON_PREFIX}/include +} + +function mason_ldflags { + echo "-lproj" +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/protobuf/3.4.1/script.sh b/scripts/protobuf/3.4.1/script.sh index 3380e4438..c0e006e2a 100755 --- a/scripts/protobuf/3.4.1/script.sh +++ b/scripts/protobuf/3.4.1/script.sh @@ -3,7 +3,7 @@ MASON_NAME=protobuf MASON_VERSION=3.4.1 -if [ ${MASON_PLATFORM} == 'ios' ]; then +if [[ ${MASON_PLATFORM} == 'ios' ]]; then MASON_LIB_FILE=lib-isim-i386/libprotobuf-lite.a MASON_PKGCONFIG_FILE=lib-isim-i386/pkgconfig/protobuf-lite.pc else @@ -28,23 +28,23 @@ function mason_compile { export CFLAGS="${CFLAGS} -O3 -DNDEBUG" export CXXFLAGS="${CXXFLAGS} -O3 -DNDEBUG" - if [ ${MASON_PLATFORM} == 'android' ]; then + if [[ ${MASON_PLATFORM} == 'android' ]]; then export LDFLAGS="${LDFLAGS} -llog" fi - if [ ${MASON_PLATFORM} == 'android' ] || [ ${MASON_PLATFORM} == 'ios' ]; then + if [[ ${MASON_PLATFORM} == 'android' ]] || [[ ${MASON_PLATFORM} == 'ios' ]]; then local PREFIX=$(MASON_PLATFORM= MASON_PLATFORM_VERSION= ${MASON_DIR}/mason prefix ${MASON_NAME} ${MASON_VERSION}) - if [ ! -d ${PREFIX} ]; then + if [[ ! -d ${PREFIX} ]]; then $(MASON_PLATFORM= MASON_PLATFORM_VERSION= ${MASON_DIR}/mason install ${MASON_NAME} ${MASON_VERSION}) fi export PROTOBUF_XC_ARG="--with-protoc=${PREFIX}/bin/protoc" fi - if [ ${MASON_PLATFORM} == 'ios' ]; then + if [[ ${MASON_PLATFORM} == 'ios' ]]; then export MACOSX_DEPLOYMENT_TARGET="10.8" fi - if [ -f Makefile ]; then + if [[ -f Makefile ]]; then make distclean fi @@ -65,7 +65,7 @@ function mason_clean { } function mason_config_custom { - if [ ${MASON_PLATFORM} == 'android' ]; then + if [[ ${MASON_PLATFORM} == 'android' ]]; then MASON_CONFIG_LDFLAGS="${MASON_CONFIG_LDFLAGS} -llog" fi } diff --git a/scripts/protobuf_c/1.3.0/script.sh b/scripts/protobuf_c/1.3.0/script.sh index ba87fc30c..f026b73ef 100755 --- a/scripts/protobuf_c/1.3.0/script.sh +++ b/scripts/protobuf_c/1.3.0/script.sh @@ -20,12 +20,25 @@ function mason_load_source { PROTOBUF_VERSION="3.4.1" function mason_prepare_compile { + cd $(dirname ${MASON_ROOT}) ${MASON_DIR}/mason install protobuf ${PROTOBUF_VERSION} MASON_PROTOBUF=$(${MASON_DIR}/mason prefix protobuf ${PROTOBUF_VERSION}) export PKG_CONFIG_PATH=${MASON_PROTOBUF}/lib/pkgconfig:${PKG_CONFIG_PATH:-} + if [[ $(uname -s) == 'Darwin' ]]; then + FIND="\/Users\/travis\/build\/mapbox\/mason" + else + FIND="\/home\/travis\/build\/mapbox\/mason" + fi + REPLACE="$(pwd)" + REPLACE=${REPLACE////\\/} + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_PROTOBUF}/lib/pkgconfig/protobuf.pc + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_PROTOBUF}/lib/pkgconfig/protobuf-lite.pc + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_PROTOBUF}/lib/libprotobuf.la + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_PROTOBUF}/lib/libprotoc.la } function mason_compile { + export PATH=${MASON_PROTOBUF}/bin:${PATH} # note CFLAGS overrides defaults (-O2 -g -DNDEBUG) so we need to add optimization flags back export CFLAGS="${CFLAGS} -O3 -DNDEBUG" export CXXFLAGS="${CXXFLAGS} -O3 -DNDEBUG" diff --git a/scripts/protozero/1.6.2/.travis.yml b/scripts/protozero/1.6.2/.travis.yml new file mode 100644 index 000000000..15c3ad555 --- /dev/null +++ b/scripts/protozero/1.6.2/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/protozero/1.6.2/script.sh b/scripts/protozero/1.6.2/script.sh new file mode 100644 index 000000000..e8c0e0a49 --- /dev/null +++ b/scripts/protozero/1.6.2/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=protozero +MASON_VERSION=1.6.2 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/protozero/archive/v${MASON_VERSION}.tar.gz \ + 6c9cc925fc9aee4285d5bd489c2f3978a3d66b84 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/protozero-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/protozero ${MASON_PREFIX}/include/protozero +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/protozero/1.6.3/.travis.yml b/scripts/protozero/1.6.3/.travis.yml new file mode 100644 index 000000000..15c3ad555 --- /dev/null +++ b/scripts/protozero/1.6.3/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/protozero/1.6.3/script.sh b/scripts/protozero/1.6.3/script.sh new file mode 100644 index 000000000..30cd30d89 --- /dev/null +++ b/scripts/protozero/1.6.3/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=protozero +MASON_VERSION=1.6.3 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/protozero/archive/v${MASON_VERSION}.tar.gz \ + 7f698d0ba99c54d061c456ca75c266a067a64847 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/protozero-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/protozero ${MASON_PREFIX}/include/protozero +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/protozero/1.6.4/.travis.yml b/scripts/protozero/1.6.4/.travis.yml new file mode 100644 index 000000000..15c3ad555 --- /dev/null +++ b/scripts/protozero/1.6.4/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/protozero/1.6.4/script.sh b/scripts/protozero/1.6.4/script.sh new file mode 100644 index 000000000..0306a3e59 --- /dev/null +++ b/scripts/protozero/1.6.4/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=protozero +MASON_VERSION=1.6.4 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/protozero/archive/v${MASON_VERSION}.tar.gz \ + fb05234cc1095d69b181b090d85537f8a7f39c91 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/protozero-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/protozero ${MASON_PREFIX}/include/protozero +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/protozero/1.6.5/.travis.yml b/scripts/protozero/1.6.5/.travis.yml new file mode 100644 index 000000000..15c3ad555 --- /dev/null +++ b/scripts/protozero/1.6.5/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/protozero/1.6.5/script.sh b/scripts/protozero/1.6.5/script.sh new file mode 100644 index 000000000..d67ba587c --- /dev/null +++ b/scripts/protozero/1.6.5/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=protozero +MASON_VERSION=1.6.5 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/protozero/archive/v${MASON_VERSION}.tar.gz \ + 98e9f7fb5b59eaffe3133c6241b1b73c3dcede9f + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/protozero-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/protozero ${MASON_PREFIX}/include/protozero +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/protozero/1.6.6/.travis.yml b/scripts/protozero/1.6.6/.travis.yml new file mode 100644 index 000000000..15c3ad555 --- /dev/null +++ b/scripts/protozero/1.6.6/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/protozero/1.6.6/script.sh b/scripts/protozero/1.6.6/script.sh new file mode 100644 index 000000000..f9f74c538 --- /dev/null +++ b/scripts/protozero/1.6.6/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=protozero +MASON_VERSION=1.6.6 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/protozero/archive/v${MASON_VERSION}.tar.gz \ + 58f5c2058c5e44ad963b03e91b0a77232e665255 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/protozero-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/protozero ${MASON_PREFIX}/include/protozero +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/protozero/1.6.8/.travis.yml b/scripts/protozero/1.6.8/.travis.yml new file mode 100644 index 000000000..15c3ad555 --- /dev/null +++ b/scripts/protozero/1.6.8/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/protozero/1.6.8/script.sh b/scripts/protozero/1.6.8/script.sh new file mode 100644 index 000000000..aa647ed0b --- /dev/null +++ b/scripts/protozero/1.6.8/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=protozero +MASON_VERSION=1.6.8 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/protozero/archive/v${MASON_VERSION}.tar.gz \ + 426f5fe23557bf430067412d92ba6188b0cb21b2 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/protozero-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/protozero ${MASON_PREFIX}/include/protozero +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/protozero/1.7.0/.travis.yml b/scripts/protozero/1.7.0/.travis.yml new file mode 100644 index 000000000..15c3ad555 --- /dev/null +++ b/scripts/protozero/1.7.0/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/protozero/1.7.0/script.sh b/scripts/protozero/1.7.0/script.sh new file mode 100644 index 000000000..55a79b5c2 --- /dev/null +++ b/scripts/protozero/1.7.0/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=protozero +MASON_VERSION=1.7.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/protozero/archive/v${MASON_VERSION}.tar.gz \ + 42432375e052383c515bdef51abd46289039728c + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/protozero-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/protozero ${MASON_PREFIX}/include/protozero +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/ragel/6.9/script.sh b/scripts/ragel/6.9/script.sh index a3ebcd9b1..445589a29 100755 --- a/scripts/ragel/6.9/script.sh +++ b/scripts/ragel/6.9/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/ragel function mason_load_source { mason_download \ - http://www.colm.net/files/ragel/ragel-${MASON_VERSION}.tar.gz \ + https://www.colm.net/files/ragel/ragel-${MASON_VERSION}.tar.gz \ adf45ba5bb04359e6a0f8d5a98bfc10e6388bf21 mason_extract_tar_gz diff --git a/scripts/re2/2017-08-01/script.sh b/scripts/re2/2017-08-01/script.sh index 553d17682..121e92c25 100755 --- a/scripts/re2/2017-08-01/script.sh +++ b/scripts/re2/2017-08-01/script.sh @@ -25,8 +25,8 @@ function mason_compile { make obj/libre2.a -j${MASON_CONCURRENCY} # re2's install script is janky (hardcoded - as far as I can tell - to /usr/local) and hardcoded - # to also install the shared library (we only what the static one) and simple enough to re-invent - # so install of calling `make install` we instead just manually install the library and headers + # to also install the shared library (we only want the static one) and simple enough to re-invent + # so instead of calling `make install` we just manually install the library and headers mkdir -p ${MASON_PREFIX}/lib/ cp obj/libre2.a ${MASON_PREFIX}/lib/ mkdir -p ${MASON_PREFIX}/include/re2/ diff --git a/scripts/redis/3.0.7/script.sh b/scripts/redis/3.0.7/script.sh index b47963084..d5348283f 100755 --- a/scripts/redis/3.0.7/script.sh +++ b/scripts/redis/3.0.7/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/redis-server function mason_load_source { mason_download \ - http://download.redis.io/releases/redis-${MASON_VERSION}.tar.gz \ + https://download.redis.io/releases/redis-${MASON_VERSION}.tar.gz \ e654c84603529c58ef671fa1a50ea84e758316aa mason_extract_tar_gz diff --git a/scripts/redis/3.2.9-configurable-malloc/script.sh b/scripts/redis/3.2.9-configurable-malloc/script.sh index 71cb6e194..93ac9e986 100755 --- a/scripts/redis/3.2.9-configurable-malloc/script.sh +++ b/scripts/redis/3.2.9-configurable-malloc/script.sh @@ -9,7 +9,7 @@ MASON_LIB_FILE=bin/redis-server function mason_load_source { mason_download \ - http://download.redis.io/releases/redis-${RAW_VERSION}.tar.gz \ + https://download.redis.io/releases/redis-${RAW_VERSION}.tar.gz \ 6b2cc5a8223d235d1d2673fa8f806baf1847baa9 mason_extract_tar_gz diff --git a/scripts/redis/3.2.9/script.sh b/scripts/redis/3.2.9/script.sh index 6b0acfea5..7153b82e3 100755 --- a/scripts/redis/3.2.9/script.sh +++ b/scripts/redis/3.2.9/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/redis-server function mason_load_source { mason_download \ - http://download.redis.io/releases/redis-${MASON_VERSION}.tar.gz \ + https://download.redis.io/releases/redis-${MASON_VERSION}.tar.gz \ 6b2cc5a8223d235d1d2673fa8f806baf1847baa9 mason_extract_tar_gz diff --git a/scripts/sdf-glyph-foundry/0.2.0/.travis.yml b/scripts/sdf-glyph-foundry/0.2.0/.travis.yml new file mode 100644 index 000000000..65f6a6af3 --- /dev/null +++ b/scripts/sdf-glyph-foundry/0.2.0/.travis.yml @@ -0,0 +1,7 @@ +language: generic + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/sdf-glyph-foundry/0.2.0/script.sh b/scripts/sdf-glyph-foundry/0.2.0/script.sh new file mode 100644 index 000000000..81d9018c5 --- /dev/null +++ b/scripts/sdf-glyph-foundry/0.2.0/script.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +MASON_NAME=sdf-glyph-foundry +MASON_VERSION=0.2.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/sdf-glyph-foundry/archive/v${MASON_VERSION}.tar.gz \ + 1ef9a9cef2dc2c020cbf703a48afff43d76f6c99 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/sdf-glyph-foundry-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox + cp -r include/agg ${MASON_PREFIX}/include/agg +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/slang/2.3.1/script.sh b/scripts/slang/2.3.1/script.sh index d9c83b9a6..ef563fec9 100755 --- a/scripts/slang/2.3.1/script.sh +++ b/scripts/slang/2.3.1/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/libslang.a function mason_load_source { mason_download \ - http://www.jedsoft.org/releases/slang/slang-${MASON_VERSION}.tar.bz2 \ + https://www.jedsoft.org/releases/slang/slang-${MASON_VERSION}.tar.bz2 \ 8617d4745d1be3e086adb2fb8ca349a64711afc7 mason_extract_tar_bz2 diff --git a/scripts/sqlite/3.24.0-min-size/.travis.yml b/scripts/sqlite/3.24.0-min-size/.travis.yml new file mode 100644 index 000000000..b783ff56b --- /dev/null +++ b/scripts/sqlite/3.24.0-min-size/.travis.yml @@ -0,0 +1,34 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode9.4 + - os: linux + sudo: false + compiler: clang + env: MASON_PLATFORM_VERSION=cortex_a9 + - os: linux + sudo: false + compiler: clang + env: MASON_PLATFORM_VERSION=i686 + - os: linux + sudo: false + compiler: clang + env: MASON_PLATFORM_VERSION=x86_64 + - os: linux + sudo: false + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v7 + - os: linux + sudo: false + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v8 + - os: linux + sudo: false + env: MASON_PLATFORM=android MASON_ANDROID_ABI=x86 + - os: linux + sudo: false + env: MASON_PLATFORM=android MASON_ANDROID_ABI=x86-64 + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/sqlite/3.24.0-min-size/script.sh b/scripts/sqlite/3.24.0-min-size/script.sh new file mode 100755 index 000000000..c17a0fb3d --- /dev/null +++ b/scripts/sqlite/3.24.0-min-size/script.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +MASON_NAME=sqlite +MASON_VERSION=3.24.0-min-size +MASON_LIB_FILE=lib/libsqlite3.a +MASON_PKGCONFIG_FILE=lib/pkgconfig/sqlite3.pc + +SQLITE_FILE_VERSION=3240000 + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://www.sqlite.org/2018/sqlite-autoconf-${SQLITE_FILE_VERSION}.tar.gz \ + ad4be6eaaa45b26edb54d95d4de9debbd3704c9e + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/sqlite-autoconf-${SQLITE_FILE_VERSION} +} + +function mason_compile { + # Note: setting CFLAGS overrides the default in sqlite of `-g -O2` + # hence we add back the preferred optimization + CFLAGS="${CFLAGS} -Os -DNDEBUG" ./configure \ + --prefix=${MASON_PREFIX} \ + ${MASON_HOST_ARG} \ + --enable-static \ + --with-pic \ + --disable-shared \ + --disable-dependency-tracking || cat ${MASON_BUILD_PATH}/config.log + + make install -j${MASON_CONCURRENCY} +} + +function mason_strip_ldflags { + shift # -L... + shift # -lsqlite3 + echo "$@" +} + +function mason_ldflags { + mason_strip_ldflags $(`mason_pkgconfig` --static --libs) +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/sqlite/3.34.0/.travis.yml b/scripts/sqlite/3.34.0/.travis.yml new file mode 100644 index 000000000..42a376fcc --- /dev/null +++ b/scripts/sqlite/3.34.0/.travis.yml @@ -0,0 +1,39 @@ +language: generic + +matrix: + include: + - os: osx + osx_image: xcode9.4 + - os: linux + sudo: false + env: MASON_PLATFORM_VERSION=cortex_a9 + - os: linux + sudo: false + env: MASON_PLATFORM_VERSION=i686 + - os: linux + sudo: false + - os: linux + sudo: false + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v5 + - os: linux + sudo: false + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v7 + - os: linux + sudo: false + env: MASON_PLATFORM=android MASON_ANDROID_ABI=arm-v8 + - os: linux + sudo: false + env: MASON_PLATFORM=android MASON_ANDROID_ABI=x86 + - os: linux + sudo: false + env: MASON_PLATFORM=android MASON_ANDROID_ABI=x86-64 + - os: linux + sudo: false + env: MASON_PLATFORM=android MASON_ANDROID_ABI=mips + - os: linux + sudo: false + env: MASON_PLATFORM=android MASON_ANDROID_ABI=mips-64 + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/sqlite/3.34.0/script.sh b/scripts/sqlite/3.34.0/script.sh new file mode 100755 index 000000000..cac65bbab --- /dev/null +++ b/scripts/sqlite/3.34.0/script.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +MASON_NAME=sqlite +MASON_VERSION=3.34.0 +MASON_LIB_FILE=lib/libsqlite3.a +MASON_PKGCONFIG_FILE=lib/pkgconfig/sqlite3.pc + +SQLITE_FILE_VERSION=3340000 + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://www.sqlite.org/2020/sqlite-autoconf-${SQLITE_FILE_VERSION}.tar.gz \ + 4ddcb8924ca93836dc5d590929e715083027212a + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/sqlite-autoconf-${SQLITE_FILE_VERSION} +} + +function mason_compile { + # Note: setting CFLAGS overrides the default in sqlite of `-g -O2` + # hence we add back the preferred optimization + CFLAGS="-O3 ${CFLAGS} -DNDEBUG" ./configure \ + --prefix=${MASON_PREFIX} \ + ${MASON_HOST_ARG} \ + --enable-static \ + --with-pic \ + --disable-shared \ + --disable-readline \ + --disable-dependency-tracking + + make install -j${MASON_CONCURRENCY} +} + +function mason_strip_ldflags { + shift # -L... + shift # -lsqlite3 + echo "$@" +} + +function mason_ldflags { + mason_strip_ldflags $(`mason_pkgconfig` --static --libs) +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/sqlite/3.8.8.1/script.sh b/scripts/sqlite/3.8.8.1/script.sh index ad698d0d8..a78991e01 100755 --- a/scripts/sqlite/3.8.8.1/script.sh +++ b/scripts/sqlite/3.8.8.1/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/sqlite3.pc function mason_load_source { mason_download \ - http://www.sqlite.org/2015/sqlite-autoconf-3080801.tar.gz \ + https://www.sqlite.org/2015/sqlite-autoconf-3080801.tar.gz \ 24012945241c0b55774b8bad2679912e14703a24 mason_extract_tar_gz diff --git a/scripts/sqlite/3.8.8.3/script.sh b/scripts/sqlite/3.8.8.3/script.sh index 9bc34df24..a18b03fbc 100755 --- a/scripts/sqlite/3.8.8.3/script.sh +++ b/scripts/sqlite/3.8.8.3/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/sqlite3.pc function mason_load_source { mason_download \ - http://sqlite.org/2015/sqlite-autoconf-3080803.tar.gz \ + https://sqlite.org/2015/sqlite-autoconf-3080803.tar.gz \ 55d0c095e5bf76ed7b450265261b367228bbd0ba mason_extract_tar_gz diff --git a/scripts/supercluster/0.3.0/.travis.yml b/scripts/supercluster/0.3.0/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/supercluster/0.3.0/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/supercluster/0.3.0/script.sh b/scripts/supercluster/0.3.0/script.sh new file mode 100644 index 000000000..18360cad0 --- /dev/null +++ b/scripts/supercluster/0.3.0/script.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +MASON_NAME=supercluster +MASON_VERSION=0.3.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/supercluster.hpp/archive/v${MASON_VERSION}.tar.gz \ + eb2d74052b0c5a482edcbeb9d7a9b8cc5f4cd6af + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/supercluster.hpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -v include/*.hpp ${MASON_PREFIX}/include + cp -v README.md LICENSE ${MASON_PREFIX} +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/supercluster/0.3.2/.travis.yml b/scripts/supercluster/0.3.2/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/supercluster/0.3.2/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/supercluster/0.3.2/script.sh b/scripts/supercluster/0.3.2/script.sh new file mode 100644 index 000000000..92a7021a0 --- /dev/null +++ b/scripts/supercluster/0.3.2/script.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +MASON_NAME=supercluster +MASON_VERSION=0.3.2 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/supercluster.hpp/archive/v${MASON_VERSION}.tar.gz \ + e7d8d6bbf427ec3d19eb5134c1233f38cae897da + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/supercluster.hpp-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -v include/*.hpp ${MASON_PREFIX}/include + cp -v README.md LICENSE ${MASON_PREFIX} +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/swiftshader/2018-05-31/.travis.yml b/scripts/swiftshader/2018-05-31/.travis.yml new file mode 100644 index 000000000..ce2e5f43a --- /dev/null +++ b/scripts/swiftshader/2018-05-31/.travis.yml @@ -0,0 +1,30 @@ +language: cpp + +sudo: false + +compiler: clang + +addons: + apt: + update: true + sources: [ 'george-edison55-precise-backports' ] + packages: [ 'cmake', 'cmake-data' ] + +matrix: + exclude: + - os: linux + include: + - os: osx + osx_image: xcode9.3 + env: MASON_PLATFORM=osx + - os: linux + dist: trusty + env: MASON_PLATFORM=linux + +install: + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/swiftshader/2018-05-31/script.sh b/scripts/swiftshader/2018-05-31/script.sh new file mode 100644 index 000000000..a6fedd86d --- /dev/null +++ b/scripts/swiftshader/2018-05-31/script.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +MASON_NAME=swiftshader +MASON_VERSION=2018-05-31 +MASON_LIB_FILE=lib/libGLESv2.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + export MASON_BUILD_PATH=${MASON_ROOT}/.build/swiftshader-${MASON_VERSION} + if [ ! -d "${MASON_BUILD_PATH}" ]; then + git clone --branch release-${MASON_VERSION} https://github.com/mapbox/swiftshader.git "${MASON_BUILD_PATH}" + fi + git -C "${MASON_BUILD_PATH}" clean -fdxebuild + git -C "${MASON_BUILD_PATH}" checkout release-${MASON_VERSION} + git -C "${MASON_BUILD_PATH}" submodule update --init +} + +function mason_compile { + cmake -H. -Bbuild \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX="${MASON_PREFIX}" \ + -DBUILD_GLES_CM=NO \ + -DBUILD_SAMPLES=NO \ + -DREACTOR_BACKEND=LLVM + make -C build -j${MASON_CONCURRENCY} libEGL libGLESv2 + + rm -rf "${MASON_PREFIX}" + mkdir -p "${MASON_PREFIX}/lib" + cp -av build/lib{EGL,GLESv2}.*${MASON_DYNLIB_SUFFIX}* "${MASON_PREFIX}/lib/" + rsync -av "include" "${MASON_PREFIX}" --exclude Direct3D --exclude GL --exclude GLES +} + +function mason_cflags { + echo "-isystem ${MASON_PREFIX}/include" +} + +function mason_ldflags { + echo "-L${MASON_PREFIX}/lib -lEGL -lGLESv2" +} + +function mason_static_libs { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/swiftshader/2018-06-29-539468c/.travis.yml b/scripts/swiftshader/2018-06-29-539468c/.travis.yml new file mode 100644 index 000000000..ce2e5f43a --- /dev/null +++ b/scripts/swiftshader/2018-06-29-539468c/.travis.yml @@ -0,0 +1,30 @@ +language: cpp + +sudo: false + +compiler: clang + +addons: + apt: + update: true + sources: [ 'george-edison55-precise-backports' ] + packages: [ 'cmake', 'cmake-data' ] + +matrix: + exclude: + - os: linux + include: + - os: osx + osx_image: xcode9.3 + env: MASON_PLATFORM=osx + - os: linux + dist: trusty + env: MASON_PLATFORM=linux + +install: + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/swiftshader/2018-06-29-539468c/script.sh b/scripts/swiftshader/2018-06-29-539468c/script.sh new file mode 100644 index 000000000..9339f5b72 --- /dev/null +++ b/scripts/swiftshader/2018-06-29-539468c/script.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash + +MASON_NAME=swiftshader +MASON_VERSION=2018-06-29-539468c +GITSHA=539468cef869524e7b4a387b41b4bcb4236a1aff +MASON_LIB_FILE=lib/libGLESv2.${MASON_DYNLIB_SUFFIX} + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/google/swiftshader/archive/${GITSHA}.tar.gz \ + 6850161428f42313eeb5922322325a4535b7e077 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${GITSHA} + + cd ${MASON_BUILD_PATH} + + patch <131, and if + # that fails, try >= 1.7.3 +-PKG_CHECK_MODULES([DEPS], [protobuf >= 2.4.0 libcurl >= 7.35.0 zlib >= 1.2.8 liblz4 >= 131],, [ +- PKG_CHECK_MODULES([DEPS], [protobuf >= 2.4.0 libcurl >= 7.35.0 zlib >= 1.2.8 liblz4 >= 1.7.3]) ++PKG_CHECK_MODULES([DEPS], [protobuf >= 2.4.0 libcurl >= 7.35.0 zlib >= 1.2.3 liblz4 >= 131],, [ ++ PKG_CHECK_MODULES([DEPS], [protobuf >= 2.4.0 libcurl >= 7.35.0 zlib >= 1.2.3 liblz4 >= 1.7.3]) + ]) + + # if we wanted services +diff --git a/src/baldr/graphreader.cc b/src/baldr/graphreader.cc +index 520469c0..56b0408f 100644 +--- a/src/baldr/graphreader.cc ++++ b/src/baldr/graphreader.cc +@@ -247,22 +247,7 @@ const GraphTile* GraphReader::GetGraphTile(const GraphId& graphid) { + return inserted; + }// Try getting it from flat file + else { +- // This reads the tile from disk +- GraphTile tile(tile_dir_, base); +- if (!tile.header()) { +- if(tile_url_.empty() || _404s.find(base) != _404s.end()) +- return nullptr; +- tile = GraphTile(tile_url_, base, curler); +- if(!tile.header()) { +- _404s.insert(base); +- return nullptr; +- } +- } +- +- // Keep a copy in the cache and return it +- size_t size = tile.header()->end_offset(); +- auto inserted = cache_->Put(base, tile, size); +- return inserted; ++ return nullptr; + } + } + +diff --git a/src/baldr/graphtile.cc b/src/baldr/graphtile.cc +index 94b3af7e..8a153a26 100644 +--- a/src/baldr/graphtile.cc ++++ b/src/baldr/graphtile.cc +@@ -123,24 +123,6 @@ GraphTile::GraphTile(const GraphId& graphid, char* ptr, size_t size) + Initialize(graphid, ptr, size); + } + +-GraphTile::GraphTile(const std::string& tile_url, const GraphId& graphid, curler_t& curler) { +- // Don't bother with invalid ids +- if (!graphid.Is_Valid() || graphid.level() > TileHierarchy::get_max_level()) +- return; +- +- // Get the response returned from curl +- std::string uri = tile_url + filesystem::path_separator + FileSuffix(graphid.Tile_Base()); +- long http_code; +- auto tile_data = curler(uri, http_code); +- +- // If its good try to use it +- if(http_code == 200) { +- graphtile_ = std::make_shared >(std::move(tile_data)); +- Initialize(graphid, &(*graphtile_)[0], graphtile_->size()); +- //TODO: optionally write the tile to disk? +- } +-} +- + GraphTile::~GraphTile() { + } + +diff --git a/src/thor/matrix_action.cc b/src/thor/matrix_action.cc +index a6c79c03..ff142240 100644 +--- a/src/thor/matrix_action.cc ++++ b/src/thor/matrix_action.cc +@@ -1,3 +1,4 @@ ++#include "midgard/logging.h" + #include "thor/worker.h" + #include "sif/autocost.h" + #include "sif/bicyclecost.h" +diff --git a/src/thor/trace_route_action.cc b/src/thor/trace_route_action.cc +index 346c85d8..2ee0e99f 100644 +--- a/src/thor/trace_route_action.cc ++++ b/src/thor/trace_route_action.cc +@@ -8,6 +8,7 @@ + + #include "exception.h" + #include "meili/map_matcher.h" ++#include + + #include "thor/route_matcher.h" + #include "thor/map_matcher.h" +diff --git a/src/tyr/locate_serializer.cc b/src/tyr/locate_serializer.cc +index 1760a9ea..84a7f833 100644 +--- a/src/tyr/locate_serializer.cc ++++ b/src/tyr/locate_serializer.cc +@@ -1,6 +1,7 @@ + #include + #include "baldr/json.h" + #include "tyr/serializers.h" ++#include "midgard/logging.h" + + using namespace valhalla; + using namespace valhalla::baldr; +diff --git a/valhalla/baldr/graphreader.h b/valhalla/baldr/graphreader.h +index dfe1b1fe..d9fe0935 100644 +--- a/valhalla/baldr/graphreader.h ++++ b/valhalla/baldr/graphreader.h +@@ -6,7 +6,6 @@ + #include + #include + +-#include + #include + #include + #include +@@ -562,7 +561,6 @@ class GraphReader { + static std::shared_ptr get_extract_instance(const boost::property_tree::ptree& pt); + + // Stuff for getting at remote tiles +- curler_t curler; + std::string tile_url_; + std::unordered_set _404s; + // Information about where the tiles are kept +diff --git a/valhalla/baldr/graphtile.h b/valhalla/baldr/graphtile.h +index 7e80e549..03c16378 100644 +--- a/valhalla/baldr/graphtile.h ++++ b/valhalla/baldr/graphtile.h +@@ -19,7 +19,6 @@ + #include + #include + #include +-#include + + #include + #include +@@ -60,14 +59,6 @@ class GraphTile { + */ + GraphTile(const GraphId& graphid, char* ptr, size_t size); + +- /** +- * Constructor given the graph Id, in memory tile data +- * @param tile_url URL of tile +- * @param graphid Tile Id +- * @param curler curler that will handle tile downloading +- */ +- GraphTile(const std::string& tile_url, const GraphId& graphid, curler_t& curler); +- + /** + * Destructor + */ diff --git a/scripts/valhalla/2.4.9/script.sh b/scripts/valhalla/2.4.9/script.sh new file mode 100755 index 000000000..8d1ccffe8 --- /dev/null +++ b/scripts/valhalla/2.4.9/script.sh @@ -0,0 +1,156 @@ +#!/usr/bin/env bash + +MASON_NAME=valhalla +MASON_VERSION=2.4.9 +MASON_LIB_FILE=lib/libvalhalla.a +#MASON_PKGCONFIG_FILE=lib/pkgconfig/libvalhalla.pc + + +. ${MASON_DIR}/mason.sh + +function mason_load_source { +# mason_download \ +# https://github.com/valhalla/${MASON_NAME}/archive/${MASON_VERSION}.tar.gz \ +# 12718a7f8d26f707469895fb2a7e69f748356f7d +# +# mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} + + if [ ! -d "$MASON_BUILD_PATH" ] ; then + mkdir -p "$MASON_BUILD_PATH" + git clone --branch "$MASON_VERSION" --recursive https://github.com/valhalla/valhalla.git "$MASON_BUILD_PATH" + fi +} + +function mason_prepare_compile { + # Install the zlib dependency when cross compiling as usually the host system only + # provides the zlib headers and libraries in the path for the host architecture. + if [ ${MASON_PLATFORM_VERSION} == "cortex_a9" ] || [ ${MASON_PLATFORM_VERSION} == "i686" ]; then + cd $(dirname ${MASON_ROOT}) + ${MASON_DIR}/mason install zlib_shared ${ZLIB_SHARED_VERSION} + ${MASON_DIR}/mason link zlib_shared ${ZLIB_SHARED_VERSION} + + MASON_ZLIB_CFLAGS="$(${MASON_DIR}/mason cflags zlib_shared ${ZLIB_SHARED_VERSION})" + MASON_ZLIB_LDFLAGS="-L$(${MASON_DIR}/mason prefix zlib_shared ${ZLIB_SHARED_VERSION})/lib" + fi + + ${MASON_DIR}/mason install lz4 1.8.2 + ${MASON_DIR}/mason link lz4 1.8.2 + + ${MASON_DIR}/mason install protobuf 3.5.1 + ${MASON_DIR}/mason link protobuf 3.5.1 + + ${MASON_DIR}/mason install boost 1.66.0 + ${MASON_DIR}/mason link boost 1.66.0 + ${MASON_DIR}/mason install boost_libprogram_options 1.66.0 + ${MASON_DIR}/mason link boost_libprogram_options 1.66.0 + ${MASON_DIR}/mason install boost_libsystem 1.66.0 + ${MASON_DIR}/mason link boost_libsystem 1.66.0 + ${MASON_DIR}/mason install boost_libthread 1.66.0 + ${MASON_DIR}/mason link boost_libthread 1.66.0 + ${MASON_DIR}/mason install boost_libfilesystem 1.66.0 + ${MASON_DIR}/mason link boost_libfilesystem 1.66.0 + ${MASON_DIR}/mason install boost_libregex_icu57 1.66.0 + ${MASON_DIR}/mason link boost_libregex_icu57 1.66.0 + ${MASON_DIR}/mason install boost_libregex 1.66.0 + ${MASON_DIR}/mason link boost_libregex 1.66.0 + ${MASON_DIR}/mason install boost_libdate_time 1.66.0 + ${MASON_DIR}/mason link boost_libdate_time 1.66.0 + ${MASON_DIR}/mason install boost_libiostreams 1.66.0 + ${MASON_DIR}/mason link boost_libiostreams 1.66.0 + + ${MASON_DIR}/mason install lua 5.3.0 + ${MASON_DIR}/mason link lua 5.3.0 + + ${MASON_DIR}/mason install sqlite 3.21.0 + ${MASON_DIR}/mason link sqlite 3.21.0 + + if [ ${MASON_PLATFORM} = 'osx' ]; then + ${MASON_DIR}/mason install libcurl system + ${MASON_DIR}/mason link libcurl system + else + ${MASON_DIR}/mason install libcurl 7.50.2 + ${MASON_DIR}/mason link libcurl 7.50.2 + fi + + # set up to fix libtool .la files + # https://github.com/mapbox/mason/issues/61 + if [[ $(uname -s) == 'Darwin' ]]; then + FIND="\/Users\/travis\/build\/mapbox\/mason" + else + FIND="\/home\/travis\/build\/mapbox\/mason" + fi + REPLACE="$(pwd)" + REPLACE=${REPLACE////\\/} + + ${MASON_DIR}/mason install geos 3.6.2 + ${MASON_DIR}/mason link geos 3.6.2 + MASON_GEOS=$(${MASON_DIR}/mason prefix geos 3.6.2) + perl -i -p -e "s/${FIND}/${REPLACE}/g;" ${MASON_GEOS}/bin/geos-config +} + +function mason_compile { + export CXXFLAGS="-isystem ${MASON_ROOT}/.link/include ${CXXFLAGS:-} -D_GLIBCXX_USE_CXX11_ABI=0" + export CFLAGS="-isystem ${MASON_ROOT}/.link/include ${CFLAGS:-}" + export LDFLAGS="-L${MASON_ROOT}/.link/lib ${LDFLAGS:-}" + export PATH="${MASON_ROOT}/.link/bin:${PATH}" + export LUA="${MASON_ROOT}/.link/bin/lua" + export LUA_INCLUDE="-isystem ${MASON_ROOT}/.link/include" + export LUA_LIB="-llua" + +#declare -x MASON_BUILD_PATH="/Users/danpat/mapbox/mason/mason_packages/.build/valhalla-2.4.9" +#declare -x MASON_DIR="/Users/danpat/mapbox/mason" +#declare -x MASON_DYNLIB_SUFFIX="dylib" +#declare -x MASON_HOST_ARG="--host=x86_64-apple-darwin" +#declare -x MASON_PLATFORM_VERSION="x86_64" +#declare -x MASON_ROOT="/Users/danpat/mapbox/mason/mason_packages" + +export + + patch -N -p1 < ${MASON_DIR}/scripts/${MASON_NAME}/${MASON_VERSION}/patch.diff + + NOCONFIGURE=1 ./autogen.sh + + + PKG_CONFIG_PATH="${MASON_ROOT}/.link/lib/pkgconfig" \ + ./configure \ + --prefix="$MASON_PREFIX" \ + ${MASON_HOST_ARG} \ + --enable-static \ + --with-pic \ + --disable-shared \ + --disable-services \ + --disable-data_tools \ + --disable-dependency-tracking \ + --disable-python-bindings \ + --with-pkgconfigdir="${MASON_ROOT}/.link/lib/pkgconfig" \ + --with-protoc="$MASON_ROOT/.link/bin/protoc" \ + --with-protobuf-includes="$MASON_ROOT/.link/include" \ + --with-protobuf-libdir="$MASON_ROOT/.link/lib" \ + --with-boost=$(${MASON_DIR}/mason prefix boost 1.66.0) \ + --with-boost-libdir="${MASON_ROOT}/.link/lib" \ + --with-boost-python=no \ + --with-sqlite3=$(${MASON_DIR}/mason prefix sqlite 3.21.0) \ + --with-geos=${MASON_GEOS}/bin/geos-config + + V=1 VERBOSE=1 make install -j${MASON_CONCURRENCY} +} + +function mason_strip_ldflags { + : +} + +function mason_cflags { + : +} + +function mason_ldflags { + : +} + +function mason_clean { + make clean +} + +mason_run "$@" diff --git a/scripts/variant/1.1.6/.travis.yml b/scripts/variant/1.1.6/.travis.yml new file mode 100644 index 000000000..e7e77e2b0 --- /dev/null +++ b/scripts/variant/1.1.6/.travis.yml @@ -0,0 +1,6 @@ +language: cpp +sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/variant/1.1.6/script.sh b/scripts/variant/1.1.6/script.sh new file mode 100755 index 000000000..b6f17c919 --- /dev/null +++ b/scripts/variant/1.1.6/script.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +MASON_NAME=variant +MASON_VERSION=1.1.6 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/variant/archive/v${MASON_VERSION}.tar.gz \ + 8b0b2d7902910b09812e68b92318fad5c4ffb12b + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/variant-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox + cp -v README.md LICENSE ${MASON_PREFIX} +} + +function mason_cflags { + echo -isystem ${MASON_PREFIX}/include -I${MASON_PREFIX}/include +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/variant/1.2.0/.travis.yml b/scripts/variant/1.2.0/.travis.yml new file mode 100644 index 000000000..e7e77e2b0 --- /dev/null +++ b/scripts/variant/1.2.0/.travis.yml @@ -0,0 +1,6 @@ +language: cpp +sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/variant/1.2.0/script.sh b/scripts/variant/1.2.0/script.sh new file mode 100755 index 000000000..80f495033 --- /dev/null +++ b/scripts/variant/1.2.0/script.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +MASON_NAME=variant +MASON_VERSION=1.2.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/variant/archive/v${MASON_VERSION}.tar.gz \ + 8a4085f9a7a65335625834a6ad837426f0233520 + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/variant-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox + cp -v README.md LICENSE ${MASON_PREFIX} +} + +function mason_cflags { + echo -isystem ${MASON_PREFIX}/include -I${MASON_PREFIX}/include +} + +function mason_ldflags { + : +} + +function mason_static_libs { + : +} + +mason_run "$@" diff --git a/scripts/vector-tile/1.0.2/.travis.yml b/scripts/vector-tile/1.0.2/.travis.yml new file mode 100644 index 000000000..33860b45a --- /dev/null +++ b/scripts/vector-tile/1.0.2/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/vector-tile/1.0.2/script.sh b/scripts/vector-tile/1.0.2/script.sh new file mode 100644 index 000000000..0c1d096af --- /dev/null +++ b/scripts/vector-tile/1.0.2/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=vector-tile +MASON_VERSION=1.0.2 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + 3a88cf47e703a8d4ba88285a72a18911f62a7476 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/vector-tile/1.0.3/.travis.yml b/scripts/vector-tile/1.0.3/.travis.yml new file mode 100644 index 000000000..33860b45a --- /dev/null +++ b/scripts/vector-tile/1.0.3/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/vector-tile/1.0.3/script.sh b/scripts/vector-tile/1.0.3/script.sh new file mode 100644 index 000000000..0c9f85d6a --- /dev/null +++ b/scripts/vector-tile/1.0.3/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=vector-tile +MASON_VERSION=1.0.3 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/${MASON_NAME}/archive/v${MASON_VERSION}.tar.gz \ + 7c1ddb535c2754e44a3310b82084249b56be20eb + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/vector-tile/36d5eb0/.travis.yml b/scripts/vector-tile/36d5eb0/.travis.yml new file mode 100644 index 000000000..15c3ad555 --- /dev/null +++ b/scripts/vector-tile/36d5eb0/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/vector-tile/36d5eb0/script.sh b/scripts/vector-tile/36d5eb0/script.sh new file mode 100644 index 000000000..a31f38f48 --- /dev/null +++ b/scripts/vector-tile/36d5eb0/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=vector-tile +MASON_VERSION=36d5eb0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/${MASON_NAME}/tarball/${MASON_VERSION} \ + 4385496adad787a8b37792f4e6719b8b6c71cf21 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapbox-${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/vector-tile/44975a1/.travis.yml b/scripts/vector-tile/44975a1/.travis.yml new file mode 100644 index 000000000..33860b45a --- /dev/null +++ b/scripts/vector-tile/44975a1/.travis.yml @@ -0,0 +1,10 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/vector-tile/44975a1/script.sh b/scripts/vector-tile/44975a1/script.sh new file mode 100644 index 000000000..a4363b054 --- /dev/null +++ b/scripts/vector-tile/44975a1/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=vector-tile +MASON_VERSION=44975a1 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/${MASON_NAME}/tarball/${MASON_VERSION} \ + 8860f28b45fc337cccc8d60bd182c0168874e240 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapbox-${MASON_NAME}-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + +mason_run "$@" diff --git a/scripts/vtzero/1.0.1/.travis.yml b/scripts/vtzero/1.0.1/.travis.yml new file mode 100644 index 000000000..7635baa54 --- /dev/null +++ b/scripts/vtzero/1.0.1/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/vtzero/1.0.1/script.sh b/scripts/vtzero/1.0.1/script.sh new file mode 100644 index 000000000..030da7d84 --- /dev/null +++ b/scripts/vtzero/1.0.1/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=vtzero +MASON_VERSION=1.0.1 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/vtzero/archive/v${MASON_VERSION}.tar.gz \ + 5492c5177d413f362acf1a166bad43264f21d6f8 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/vtzero-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/vtzero ${MASON_PREFIX}/include/vtzero +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/vtzero/1.0.2/.travis.yml b/scripts/vtzero/1.0.2/.travis.yml new file mode 100644 index 000000000..7635baa54 --- /dev/null +++ b/scripts/vtzero/1.0.2/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/vtzero/1.0.2/script.sh b/scripts/vtzero/1.0.2/script.sh new file mode 100644 index 000000000..fc0be9374 --- /dev/null +++ b/scripts/vtzero/1.0.2/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=vtzero +MASON_VERSION=1.0.2 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/vtzero/archive/v${MASON_VERSION}.tar.gz \ + 2bf529761a418c70ae06ab5f7439d6462890ad4b + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/vtzero-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/vtzero ${MASON_PREFIX}/include/vtzero +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/vtzero/1.0.3/.travis.yml b/scripts/vtzero/1.0.3/.travis.yml new file mode 100644 index 000000000..7635baa54 --- /dev/null +++ b/scripts/vtzero/1.0.3/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/vtzero/1.0.3/script.sh b/scripts/vtzero/1.0.3/script.sh new file mode 100644 index 000000000..4f2d76a88 --- /dev/null +++ b/scripts/vtzero/1.0.3/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=vtzero +MASON_VERSION=1.0.3 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/vtzero/archive/v${MASON_VERSION}.tar.gz \ + 047d98e07f6c0b8607e90489156b28a9355edb2c + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/vtzero-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/vtzero ${MASON_PREFIX}/include/vtzero +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/vtzero/1.1.0/.travis.yml b/scripts/vtzero/1.1.0/.travis.yml new file mode 100644 index 000000000..7635baa54 --- /dev/null +++ b/scripts/vtzero/1.1.0/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/vtzero/1.1.0/script.sh b/scripts/vtzero/1.1.0/script.sh new file mode 100644 index 000000000..7efabca98 --- /dev/null +++ b/scripts/vtzero/1.1.0/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=vtzero +MASON_VERSION=1.1.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/vtzero/archive/v${MASON_VERSION}.tar.gz \ + 6782a64655eecd0cca40845f0efc0709f069ae75 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/vtzero-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/vtzero ${MASON_PREFIX}/include/vtzero +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/vtzero/1b89523/.travis.yml b/scripts/vtzero/1b89523/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/vtzero/1b89523/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/vtzero/1b89523/script.sh b/scripts/vtzero/1b89523/script.sh new file mode 100644 index 000000000..9ac42c078 --- /dev/null +++ b/scripts/vtzero/1b89523/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=vtzero +MASON_VERSION=1b89523 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/vtzero/tarball/${MASON_VERSION} \ + ec411459f66a6ae7e0af739850912378a6fce3ce + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapbox-vtzero-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/vtzero ${MASON_PREFIX}/include/vtzero +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/vtzero/2915725/.travis.yml b/scripts/vtzero/2915725/.travis.yml new file mode 100644 index 000000000..7635baa54 --- /dev/null +++ b/scripts/vtzero/2915725/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} \ No newline at end of file diff --git a/scripts/vtzero/2915725/script.sh b/scripts/vtzero/2915725/script.sh new file mode 100644 index 000000000..085e22fe5 --- /dev/null +++ b/scripts/vtzero/2915725/script.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +MASON_NAME=vtzero +MASON_VERSION=2915725 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/vtzero/tarball/${MASON_VERSION} \ + 0636efe9f367b7b01f26a67571b4151f191295ca + + mason_extract_tar_gz + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapbox-vtzero-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/vtzero ${MASON_PREFIX}/include/vtzero +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/vtzero/7222daa/.travis.yml b/scripts/vtzero/7222daa/.travis.yml new file mode 100644 index 000000000..db27cb38e --- /dev/null +++ b/scripts/vtzero/7222daa/.travis.yml @@ -0,0 +1,12 @@ +language: generic + +matrix: + include: + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/vtzero/7222daa/script.sh b/scripts/vtzero/7222daa/script.sh new file mode 100644 index 000000000..25c6a858e --- /dev/null +++ b/scripts/vtzero/7222daa/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=vtzero +MASON_VERSION=7222daa +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/vtzero/tarball/${MASON_VERSION} \ + eba49cdfd95ed707bc87474e6f8c0cc02aebf12b + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/mapbox-vtzero-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/vtzero ${MASON_PREFIX}/include/vtzero +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/wagyu/0.5.0/.travis.yml b/scripts/wagyu/0.5.0/.travis.yml new file mode 100644 index 000000000..fcc58088c --- /dev/null +++ b/scripts/wagyu/0.5.0/.travis.yml @@ -0,0 +1,8 @@ +language: generic +sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/wagyu/0.5.0/script.sh b/scripts/wagyu/0.5.0/script.sh new file mode 100644 index 000000000..f1446e462 --- /dev/null +++ b/scripts/wagyu/0.5.0/script.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +MASON_NAME=wagyu +MASON_VERSION=0.5.0 +MASON_HEADER_ONLY=true + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/mapbox/wagyu/archive/${MASON_VERSION}.tar.gz \ + a9ea7d358f667c0542ea97c7b1b5a8b4eb2f8acf + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/wagyu-${MASON_VERSION} +} + +function mason_compile { + mkdir -p ${MASON_PREFIX}/include/ + cp -r include/mapbox ${MASON_PREFIX}/include/mapbox +} + +function mason_cflags { + echo "-I${MASON_PREFIX}/include" +} + +function mason_ldflags { + : +} + + +mason_run "$@" diff --git a/scripts/webp/0.4.2/script.sh b/scripts/webp/0.4.2/script.sh index e38e09fd0..b487b23c5 100755 --- a/scripts/webp/0.4.2/script.sh +++ b/scripts/webp/0.4.2/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libwebp.pc function mason_load_source { mason_download \ - http://downloads.webmproject.org/releases/webp/libwebp-0.4.2.tar.gz \ + https://downloads.webmproject.org/releases/webp/libwebp-0.4.2.tar.gz \ fdc496dcbcb03c9f26c2d9ce771545fa557a40c8 mason_extract_tar_gz diff --git a/scripts/webp/0.5.0/script.sh b/scripts/webp/0.5.0/script.sh index 868fc682a..310abb58a 100755 --- a/scripts/webp/0.5.0/script.sh +++ b/scripts/webp/0.5.0/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libwebp.pc function mason_load_source { mason_download \ - http://downloads.webmproject.org/releases/webp/libwebp-$MASON_VERSION.tar.gz \ + https://downloads.webmproject.org/releases/webp/libwebp-$MASON_VERSION.tar.gz \ 9e5a4130ff09d28f0217eba972dcca5a57525f03 mason_extract_tar_gz diff --git a/scripts/webp/0.5.1/script.sh b/scripts/webp/0.5.1/script.sh index 557877349..3dc2fb2ae 100755 --- a/scripts/webp/0.5.1/script.sh +++ b/scripts/webp/0.5.1/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libwebp.pc function mason_load_source { mason_download \ - http://downloads.webmproject.org/releases/webp/libwebp-$MASON_VERSION.tar.gz \ + https://downloads.webmproject.org/releases/webp/libwebp-$MASON_VERSION.tar.gz \ 7c2350c6524e8419e6b541a9087607c91c957377 mason_extract_tar_gz diff --git a/scripts/webp/0.6.0/script.sh b/scripts/webp/0.6.0/script.sh index 76b24c681..c07bf29d7 100755 --- a/scripts/webp/0.6.0/script.sh +++ b/scripts/webp/0.6.0/script.sh @@ -9,7 +9,7 @@ MASON_PKGCONFIG_FILE=lib/pkgconfig/libwebp.pc function mason_load_source { mason_download \ - http://downloads.webmproject.org/releases/webp/libwebp-$MASON_VERSION.tar.gz \ + https://downloads.webmproject.org/releases/webp/libwebp-$MASON_VERSION.tar.gz \ 7669dc9a7c110cafc9e352d3abc1753bcb465dc0 mason_extract_tar_gz diff --git a/scripts/wget/1.19.2/script.sh b/scripts/wget/1.19.2/script.sh index 884512663..66babbb60 100644 --- a/scripts/wget/1.19.2/script.sh +++ b/scripts/wget/1.19.2/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/wget function mason_load_source { mason_download \ - http://ftp.gnu.org/gnu/${MASON_NAME}/${MASON_NAME}-${MASON_VERSION}.tar.gz \ + https://ftp.gnu.org/gnu/${MASON_NAME}/${MASON_NAME}-${MASON_VERSION}.tar.gz \ 07a689125eaf3b050cd62fcb98662eeddc4982db mason_extract_tar_gz diff --git a/scripts/xz/5.2.3/script.sh b/scripts/xz/5.2.3/script.sh index e34434759..2e21c8256 100755 --- a/scripts/xz/5.2.3/script.sh +++ b/scripts/xz/5.2.3/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=lib/liblzma.a function mason_load_source { mason_download \ - http://tukaani.org/xz/xz-${MASON_VERSION}.tar.gz \ + https://tukaani.org/xz/xz-${MASON_VERSION}.tar.gz \ 147ce202755a3d846dc17479999671c7cadf0c2f mason_extract_tar_gz diff --git a/scripts/zip/3.0.0/script.sh b/scripts/zip/3.0.0/script.sh index 05c8c645d..13db00b8c 100755 --- a/scripts/zip/3.0.0/script.sh +++ b/scripts/zip/3.0.0/script.sh @@ -8,7 +8,7 @@ MASON_LIB_FILE=bin/zip function mason_load_source { mason_download \ - http://sourceforge.net/projects/infozip/files/Zip%203.x%20%28latest%29/3.0/zip30.tar.gz/download \ + https://sourceforge.net/projects/infozip/files/Zip%203.x%20%28latest%29/3.0/zip30.tar.gz/download \ 57f60be499bef90ccf84fe47d522d32504609e9b mason_extract_tar_gz diff --git a/scripts/zlib-cloudflare/e55212b/.travis.yml b/scripts/zlib-cloudflare/e55212b/.travis.yml new file mode 100644 index 000000000..189b6709f --- /dev/null +++ b/scripts/zlib-cloudflare/e55212b/.travis.yml @@ -0,0 +1,13 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/zlib-cloudflare/e55212b/script.sh b/scripts/zlib-cloudflare/e55212b/script.sh new file mode 100755 index 000000000..053cade23 --- /dev/null +++ b/scripts/zlib-cloudflare/e55212b/script.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +MASON_NAME=zlib-cloudflare +MASON_VERSION=e55212b +MASON_LIB_FILE=lib/libz.a +MASON_PKGCONFIG_FILE=lib/pkgconfig/zlib.pc + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/cloudflare/zlib/tarball/e55212b \ + a76b4cce9dbbe578b72081e92e3c6488ee6d5872 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/cloudflare-zlib-e55212b +} + +function mason_compile { + # Add optimization flags since CFLAGS overrides the default (-g -O2) + export CFLAGS="${CFLAGS} -O3 -DNDEBUG" + ./configure \ + --prefix=${MASON_PREFIX} \ + --shared + make install -j${MASON_CONCURRENCY} +} + +mason_run "$@" diff --git a/scripts/zlib-ng/013b23b/.travis.yml b/scripts/zlib-ng/013b23b/.travis.yml new file mode 100644 index 000000000..189b6709f --- /dev/null +++ b/scripts/zlib-ng/013b23b/.travis.yml @@ -0,0 +1,13 @@ +language: generic + +matrix: + include: + - os: osx + - os: linux + sudo: false + +script: +- ./mason build ${MASON_NAME} ${MASON_VERSION} + +after_success: +- ./mason publish ${MASON_NAME} ${MASON_VERSION} diff --git a/scripts/zlib-ng/013b23b/script.sh b/scripts/zlib-ng/013b23b/script.sh new file mode 100755 index 000000000..5e361def8 --- /dev/null +++ b/scripts/zlib-ng/013b23b/script.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +MASON_NAME=zlib-ng +MASON_VERSION=013b23b +MASON_LIB_FILE=lib/libz.a +MASON_PKGCONFIG_FILE=lib/pkgconfig/zlib.pc + +. ${MASON_DIR}/mason.sh + +function mason_load_source { + mason_download \ + https://github.com/Dead2/zlib-ng/tarball/013b23b \ + 873a7c61470786f87917e2b98bc87cd9fa6b8bf8 + + mason_extract_tar_gz + + export MASON_BUILD_PATH=${MASON_ROOT}/.build/Dead2-zlib-ng-013b23b +} + +function mason_compile { + # Add optimization flags since CFLAGS overrides the default (-g -O2) + export CFLAGS="${CFLAGS} -O3 -DNDEBUG" + ./configure \ + --prefix=${MASON_PREFIX} \ + --shared \ + --zlib-compat --64 + make install -j${MASON_CONCURRENCY} +} + +mason_run "$@" diff --git a/test/c_build.sh b/test/c_build.sh index 973fee8b3..467353925 100755 --- a/test/c_build.sh +++ b/test/c_build.sh @@ -3,8 +3,7 @@ set -e -u set -o pipefail -mason build expat 2.1.1 -mason build expat 2.2.0 +mason build expat 2.3.0 mason build libzip 1.1.3 mason build zlib 1.2.8 -mason build libuv 0.11.29 \ No newline at end of file +mason build libuv 0.11.29 diff --git a/utils/llvm.sh b/utils/llvm.sh index a5f99462b..121d10fa5 100755 --- a/utils/llvm.sh +++ b/utils/llvm.sh @@ -30,7 +30,7 @@ function usage() { echo "See scripts/llvm/base/README.md for more details" } -subpackages=(clang++ clang-tidy clang-format lldb llvm-cov include-what-you-use) +subpackages=(clang++ clang-tidy clang-format lldb llvm-cov) function build() { local VERSION=$1 @@ -54,11 +54,11 @@ function create() { echo "ERROR: please provide first arg of new version" exit 1 fi - if [[ -d ./scripts/llvm/${1} ]]; then + if [[ -d ./scripts/llvm/${1} ]] && [[ ${FORCE_LLVM_OVERWRITE:-false} != 1 ]]; then usage echo echo - echo "ERROR: first arg must point to a version of llvm that does not exist" + echo "ERROR: first arg must point to a version of llvm that does not exist (or pass 'FORCE_LLVM_OVERWRITE=1 ./utils/llvm.sh create'" exit 1 fi if [[ ! -d ./scripts/llvm/${2} ]]; then diff --git a/utils/new_boost.sh b/utils/new_boost.sh index 4cbc60b3c..a351580c7 100755 --- a/utils/new_boost.sh +++ b/utils/new_boost.sh @@ -1,7 +1,7 @@ set -eu set -o pipefail -: ' +: ' manual intervention: @@ -64,27 +64,28 @@ function create() { mkdir -p scripts/boost/${NEW_VERSION} cp -r scripts/boost/${LAST_VERSION}/. scripts/boost/${NEW_VERSION}/ - perl -i -p -e "s/MASON_VERSION=${LAST_VERSION}/MASON_VERSION=${NEW_VERSION}/g;" scripts/boost/${NEW_VERSION}/base.sh + perl -i -p -e "s/MASON_VERSION=${LAST_VERSION}/MASON_VERSION=${NEW_VERSION}/g;" scripts/boost/${NEW_VERSION}/base.sh export BOOST_VERSION=${NEW_VERSION//./_} export CACHE_PATH="mason_packages/.cache" mkdir -p "${CACHE_PATH}" if [[ ! -f ${CACHE_PATH}/boost-${NEW_VERSION} ]]; then - curl --retry 3 -f -S -L http://downloads.sourceforge.net/project/boost/boost/${NEW_VERSION}/boost_${BOOST_VERSION}.tar.bz2 -o ${CACHE_PATH}/boost-${NEW_VERSION} + curl --retry 3 -f -S -L https://dl.bintray.com/boostorg/release/${NEW_VERSION}/source/boost_${BOOST_VERSION}.tar.bz2 -o ${CACHE_PATH}/boost-${NEW_VERSION} fi NEW_SHASUM=$(git hash-object ${CACHE_PATH}/boost-${NEW_VERSION}) - perl -i -p -e "s/BOOST_SHASUM=(.*)/BOOST_SHASUM=${NEW_SHASUM}/g;" scripts/boost/${NEW_VERSION}/base.sh + perl -i -p -e "s/BOOST_SHASUM=(.*)/BOOST_SHASUM=${NEW_SHASUM}/g;" scripts/boost/${NEW_VERSION}/base.sh - for lib in $(find scripts/ -maxdepth 1 -type dir -name 'boost_lib*' -print); do + for lib in $(find scripts/ -maxdepth 1 -type d -name 'boost_lib*' -print); do if [[ -d $lib/${LAST_VERSION} ]]; then if [[ ${CLEAN} ]]; then rm -rf $lib/${NEW_VERSION} fi mkdir $lib/${NEW_VERSION} + echo "creating $lib/${NEW_VERSION}/" cp -r $lib/${LAST_VERSION}/. $lib/${NEW_VERSION}/ else - echo "skipping creating package for $lib" + echo "skipping creating package for $lib/${LAST_VERSION}" fi done } @@ -109,8 +110,12 @@ function trigger() { fi NEW_VERSION=${1} ./mason trigger boost ${NEW_VERSION} - for lib in $(find scripts/ -maxdepth 1 -type dir -name 'boost_lib*' -print); do - ./mason trigger $(basename $lib) ${NEW_VERSION} + for lib in $(find scripts/ -maxdepth 1 -type d -name 'boost_lib*' -print); do + if [[ -d $lib/${NEW_VERSION} ]]; then + ./mason trigger $(basename $lib) ${NEW_VERSION} + else + echo "skipping creating package for $lib/${NEW_VERSION}" + fi done } @@ -125,5 +130,3 @@ else usage exit 1 fi - - diff --git a/utils/toolchain.sh b/utils/toolchain.sh index 387ce7e91..f02467b51 100644 --- a/utils/toolchain.sh +++ b/utils/toolchain.sh @@ -3,7 +3,7 @@ set -eu set -o pipefail -CLANG_VERSION="5.0.0" +CLANG_VERSION="10.0.0" ./mason install clang++ ${CLANG_VERSION} export PATH=$(./mason prefix clang++ ${CLANG_VERSION})/bin:${PATH} export CXX=clang++