Description: update vendored crates after applying upstream de26c4d8b192ed0224e6d38f54e429838608b902 Author: Olivier Tilloy --- /dev/null +++ b/vendor/autocfg/Cargo.lock @@ -0,0 +1,6 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "autocfg" +version = "0.1.6" + --- /dev/null +++ b/vendor/autocfg/Cargo.toml @@ -0,0 +1,24 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "autocfg" +version = "0.1.6" +authors = ["Josh Stone "] +description = "Automatic cfg for Rust compiler features" +readme = "README.md" +keywords = ["rustc", "build", "autoconf"] +categories = ["development-tools::build-utils"] +license = "Apache-2.0/MIT" +repository = "https://github.com/cuviper/autocfg" + +[dependencies] --- /dev/null +++ b/vendor/autocfg/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. --- /dev/null +++ b/vendor/autocfg/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2018 Josh Stone + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. --- /dev/null +++ b/vendor/autocfg/README.md @@ -0,0 +1,81 @@ +autocfg +======= + +[![autocfg crate](https://img.shields.io/crates/v/autocfg.svg)](https://crates.io/crates/autocfg) +[![autocfg documentation](https://docs.rs/autocfg/badge.svg)](https://docs.rs/autocfg) +![minimum rustc 1.0](https://img.shields.io/badge/rustc-1.0+-red.svg) +[![Travis Status](https://travis-ci.org/cuviper/autocfg.svg?branch=master)](https://travis-ci.org/cuviper/autocfg) + +A Rust library for build scripts to automatically configure code based on +compiler support. Code snippets are dynamically tested to see if the `rustc` +will accept them, rather than hard-coding specific version support. + + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[build-dependencies] +autocfg = "0.1" +``` + +Then use it in your `build.rs` script to detect compiler features. For +example, to test for 128-bit integer support, it might look like: + +```rust +extern crate autocfg; + +fn main() { + let ac = autocfg::new(); + ac.emit_has_type("i128"); + + // (optional) We don't need to rerun for anything external. + autocfg::rerun_path(file!()); +} +``` + +If the type test succeeds, this will write a `cargo:rustc-cfg=has_i128` line +for Cargo, which translates to Rust arguments `--cfg has_i128`. Then in the +rest of your Rust code, you can add `#[cfg(has_i128)]` conditions on code that +should only be used when the compiler supports it. + + +## Release Notes + +- 0.1.6 (2019-08-19) + - Add `probe`/`emit_sysroot_crate`, by @leo60228 + +- 0.1.5 (2019-07-16) + - Mask some warnings from newer rustc. + +- 0.1.4 (2019-05-22) + - Relax `std`/`no_std` probing to a warning instead of an error. + - Improve `rustc` bootstrap compatibility. + +- 0.1.3 (2019-05-21) + - Auto-detects if `#![no_std]` is needed for the `$TARGET` + +- 0.1.2 (2019-01-16) + - Add `rerun_env(ENV)` to print `cargo:rerun-if-env-changed=ENV` + - Add `rerun_path(PATH)` to print `cargo:rerun-if-changed=PATH` + + +## Minimum Rust version policy + +This crate's minimum supported `rustc` version is `1.0.0`. Compatibility is +its entire reason for existence, so this crate will be extremely conservative +about raising this requirement. If this is ever deemed necessary, it will be +treated as a major breaking change for semver purposes. + + +## License + +This project is licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + http://opensource.org/licenses/MIT) + +at your option. --- /dev/null +++ b/vendor/autocfg/examples/integers.rs @@ -0,0 +1,9 @@ +extern crate autocfg; + +fn main() { + // Normally, cargo will set `OUT_DIR` for build scripts. + let ac = autocfg::AutoCfg::with_dir("target").unwrap(); + for i in 3..8 { + ac.emit_has_type(&format!("i{}", 1 << i)); + } +} --- /dev/null +++ b/vendor/autocfg/examples/paths.rs @@ -0,0 +1,22 @@ +extern crate autocfg; + +fn main() { + // Normally, cargo will set `OUT_DIR` for build scripts. + let ac = autocfg::AutoCfg::with_dir("target").unwrap(); + + // since ancient times... + ac.emit_has_path("std::vec::Vec"); + ac.emit_path_cfg("std::vec::Vec", "has_vec"); + + // rustc 1.10.0 + ac.emit_has_path("std::panic::PanicInfo"); + ac.emit_path_cfg("std::panic::PanicInfo", "has_panic_info"); + + // rustc 1.20.0 + ac.emit_has_path("std::mem::ManuallyDrop"); + ac.emit_path_cfg("std::mem::ManuallyDrop", "has_manually_drop"); + + // rustc 1.25.0 + ac.emit_has_path("std::ptr::NonNull"); + ac.emit_path_cfg("std::ptr::NonNull", "has_non_null"); +} --- /dev/null +++ b/vendor/autocfg/examples/traits.rs @@ -0,0 +1,26 @@ +extern crate autocfg; + +fn main() { + // Normally, cargo will set `OUT_DIR` for build scripts. + let ac = autocfg::AutoCfg::with_dir("target").unwrap(); + + // since ancient times... + ac.emit_has_trait("std::ops::Add"); + ac.emit_trait_cfg("std::ops::Add", "has_ops"); + + // trait parameters have to be provided + ac.emit_has_trait("std::borrow::Borrow"); + ac.emit_trait_cfg("std::borrow::Borrow", "has_borrow"); + + // rustc 1.8.0 + ac.emit_has_trait("std::ops::AddAssign"); + ac.emit_trait_cfg("std::ops::AddAssign", "has_assign_ops"); + + // rustc 1.12.0 + ac.emit_has_trait("std::iter::Sum"); + ac.emit_trait_cfg("std::iter::Sum", "has_sum"); + + // rustc 1.28.0 + ac.emit_has_trait("std::alloc::GlobalAlloc"); + ac.emit_trait_cfg("std::alloc::GlobalAlloc", "has_global_alloc"); +} --- /dev/null +++ b/vendor/autocfg/examples/versions.rs @@ -0,0 +1,9 @@ +extern crate autocfg; + +fn main() { + // Normally, cargo will set `OUT_DIR` for build scripts. + let ac = autocfg::AutoCfg::with_dir("target").unwrap(); + for i in 0..100 { + ac.emit_rustc_version(1, i); + } +} --- /dev/null +++ b/vendor/autocfg/src/error.rs @@ -0,0 +1,69 @@ +use std::error; +use std::fmt; +use std::io; +use std::num; +use std::str; + +/// A common error type for the `autocfg` crate. +#[derive(Debug)] +pub struct Error { + kind: ErrorKind, +} + +impl error::Error for Error { + fn description(&self) -> &str { + "AutoCfg error" + } + + fn cause(&self) -> Option<&error::Error> { + match self.kind { + ErrorKind::Io(ref e) => Some(e), + ErrorKind::Num(ref e) => Some(e), + ErrorKind::Utf8(ref e) => Some(e), + ErrorKind::Other(_) => None, + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match self.kind { + ErrorKind::Io(ref e) => e.fmt(f), + ErrorKind::Num(ref e) => e.fmt(f), + ErrorKind::Utf8(ref e) => e.fmt(f), + ErrorKind::Other(s) => s.fmt(f), + } + } +} + +#[derive(Debug)] +enum ErrorKind { + Io(io::Error), + Num(num::ParseIntError), + Utf8(str::Utf8Error), + Other(&'static str), +} + +pub fn from_io(e: io::Error) -> Error { + Error { + kind: ErrorKind::Io(e), + } +} + +pub fn from_num(e: num::ParseIntError) -> Error { + Error { + kind: ErrorKind::Num(e), + } +} + +pub fn from_utf8(e: str::Utf8Error) -> Error { + Error { + kind: ErrorKind::Utf8(e), + } +} + +pub fn from_str(s: &'static str) -> Error { + Error { + kind: ErrorKind::Other(s), + } +} --- /dev/null +++ b/vendor/autocfg/src/lib.rs @@ -0,0 +1,328 @@ +//! A Rust library for build scripts to automatically configure code based on +//! compiler support. Code snippets are dynamically tested to see if the `rustc` +//! will accept them, rather than hard-coding specific version support. +//! +//! +//! ## Usage +//! +//! Add this to your `Cargo.toml`: +//! +//! ```toml +//! [build-dependencies] +//! autocfg = "0.1" +//! ``` +//! +//! Then use it in your `build.rs` script to detect compiler features. For +//! example, to test for 128-bit integer support, it might look like: +//! +//! ```rust +//! extern crate autocfg; +//! +//! fn main() { +//! # // Normally, cargo will set `OUT_DIR` for build scripts. +//! # std::env::set_var("OUT_DIR", "target"); +//! let ac = autocfg::new(); +//! ac.emit_has_type("i128"); +//! +//! // (optional) We don't need to rerun for anything external. +//! autocfg::rerun_path(file!()); +//! } +//! ``` +//! +//! If the type test succeeds, this will write a `cargo:rustc-cfg=has_i128` line +//! for Cargo, which translates to Rust arguments `--cfg has_i128`. Then in the +//! rest of your Rust code, you can add `#[cfg(has_i128)]` conditions on code that +//! should only be used when the compiler supports it. + +#![deny(missing_debug_implementations)] +#![deny(missing_docs)] +// allow future warnings that can't be fixed while keeping 1.0 compatibility +#![allow(unknown_lints)] +#![allow(bare_trait_objects)] +#![allow(ellipsis_inclusive_range_patterns)] + +use std::env; +use std::ffi::OsString; +use std::fs; +use std::io::{stderr, Write}; +use std::path::PathBuf; +use std::process::{Command, Stdio}; +#[allow(deprecated)] +use std::sync::atomic::ATOMIC_USIZE_INIT; +use std::sync::atomic::{AtomicUsize, Ordering}; + +mod error; +pub use error::Error; + +mod version; +use version::Version; + +#[cfg(test)] +mod tests; + +/// Helper to detect compiler features for `cfg` output in build scripts. +#[derive(Clone, Debug)] +pub struct AutoCfg { + out_dir: PathBuf, + rustc: PathBuf, + rustc_version: Version, + target: Option, + no_std: bool, +} + +/// Writes a config flag for rustc on standard out. +/// +/// This looks like: `cargo:rustc-cfg=CFG` +/// +/// Cargo will use this in arguments to rustc, like `--cfg CFG`. +pub fn emit(cfg: &str) { + println!("cargo:rustc-cfg={}", cfg); +} + +/// Writes a line telling Cargo to rerun the build script if `path` changes. +/// +/// This looks like: `cargo:rerun-if-changed=PATH` +/// +/// This requires at least cargo 0.7.0, corresponding to rustc 1.6.0. Earlier +/// versions of cargo will simply ignore the directive. +pub fn rerun_path(path: &str) { + println!("cargo:rerun-if-changed={}", path); +} + +/// Writes a line telling Cargo to rerun the build script if the environment +/// variable `var` changes. +/// +/// This looks like: `cargo:rerun-if-env-changed=VAR` +/// +/// This requires at least cargo 0.21.0, corresponding to rustc 1.20.0. Earlier +/// versions of cargo will simply ignore the directive. +pub fn rerun_env(var: &str) { + println!("cargo:rerun-if-env-changed={}", var); +} + +/// Create a new `AutoCfg` instance. +/// +/// # Panics +/// +/// Panics if `AutoCfg::new()` returns an error. +pub fn new() -> AutoCfg { + AutoCfg::new().unwrap() +} + +impl AutoCfg { + /// Create a new `AutoCfg` instance. + /// + /// # Common errors + /// + /// - `rustc` can't be executed, from `RUSTC` or in the `PATH`. + /// - The version output from `rustc` can't be parsed. + /// - `OUT_DIR` is not set in the environment, or is not a writable directory. + /// + pub fn new() -> Result { + match env::var_os("OUT_DIR") { + Some(d) => Self::with_dir(d), + None => Err(error::from_str("no OUT_DIR specified!")), + } + } + + /// Create a new `AutoCfg` instance with the specified output directory. + /// + /// # Common errors + /// + /// - `rustc` can't be executed, from `RUSTC` or in the `PATH`. + /// - The version output from `rustc` can't be parsed. + /// - `dir` is not a writable directory. + /// + pub fn with_dir>(dir: T) -> Result { + let rustc = env::var_os("RUSTC").unwrap_or_else(|| "rustc".into()); + let rustc: PathBuf = rustc.into(); + let rustc_version = try!(Version::from_rustc(&rustc)); + + // Sanity check the output directory + let dir = dir.into(); + let meta = try!(fs::metadata(&dir).map_err(error::from_io)); + if !meta.is_dir() || meta.permissions().readonly() { + return Err(error::from_str("output path is not a writable directory")); + } + + let mut ac = AutoCfg { + out_dir: dir, + rustc: rustc, + rustc_version: rustc_version, + target: env::var_os("TARGET"), + no_std: false, + }; + + // Sanity check with and without `std`. + if !ac.probe("").unwrap_or(false) { + ac.no_std = true; + if !ac.probe("").unwrap_or(false) { + // Neither worked, so assume nothing... + ac.no_std = false; + let warning = b"warning: autocfg could not probe for `std`\n"; + stderr().write_all(warning).ok(); + } + } + Ok(ac) + } + + /// Test whether the current `rustc` reports a version greater than + /// or equal to "`major`.`minor`". + pub fn probe_rustc_version(&self, major: usize, minor: usize) -> bool { + self.rustc_version >= Version::new(major, minor, 0) + } + + /// Sets a `cfg` value of the form `rustc_major_minor`, like `rustc_1_29`, + /// if the current `rustc` is at least that version. + pub fn emit_rustc_version(&self, major: usize, minor: usize) { + if self.probe_rustc_version(major, minor) { + emit(&format!("rustc_{}_{}", major, minor)); + } + } + + fn probe>(&self, code: T) -> Result { + #[allow(deprecated)] + static ID: AtomicUsize = ATOMIC_USIZE_INIT; + + let id = ID.fetch_add(1, Ordering::Relaxed); + let mut command = Command::new(&self.rustc); + command + .arg("--crate-name") + .arg(format!("probe{}", id)) + .arg("--crate-type=lib") + .arg("--out-dir") + .arg(&self.out_dir) + .arg("--emit=llvm-ir"); + + if let Some(target) = self.target.as_ref() { + command.arg("--target").arg(target); + } + + command.arg("-").stdin(Stdio::piped()); + let mut child = try!(command.spawn().map_err(error::from_io)); + let mut stdin = child.stdin.take().expect("rustc stdin"); + + if self.no_std { + try!(stdin.write_all(b"#![no_std]\n").map_err(error::from_io)); + } + try!(stdin.write_all(code.as_ref()).map_err(error::from_io)); + drop(stdin); + + let status = try!(child.wait().map_err(error::from_io)); + Ok(status.success()) + } + + /// Tests whether the given sysroot crate can be used. + /// + /// The test code is subject to change, but currently looks like: + /// + /// ```ignore + /// extern crate CRATE as probe; + /// ``` + pub fn probe_sysroot_crate(&self, name: &str) -> bool { + self.probe(format!("extern crate {} as probe;", name)) // `as _` wasn't stabilized until Rust 1.33 + .unwrap_or(false) + } + + /// Emits a config value `has_CRATE` if `probe_sysroot_crate` returns true. + pub fn emit_sysroot_crate(&self, name: &str) { + if self.probe_sysroot_crate(name) { + emit(&format!("has_{}", mangle(name))); + } + } + + /// Tests whether the given path can be used. + /// + /// The test code is subject to change, but currently looks like: + /// + /// ```ignore + /// pub use PATH; + /// ``` + pub fn probe_path(&self, path: &str) -> bool { + self.probe(format!("pub use {};", path)).unwrap_or(false) + } + + /// Emits a config value `has_PATH` if `probe_path` returns true. + /// + /// Any non-identifier characters in the `path` will be replaced with + /// `_` in the generated config value. + pub fn emit_has_path(&self, path: &str) { + if self.probe_path(path) { + emit(&format!("has_{}", mangle(path))); + } + } + + /// Emits the given `cfg` value if `probe_path` returns true. + pub fn emit_path_cfg(&self, path: &str, cfg: &str) { + if self.probe_path(path) { + emit(cfg); + } + } + + /// Tests whether the given trait can be used. + /// + /// The test code is subject to change, but currently looks like: + /// + /// ```ignore + /// pub trait Probe: TRAIT + Sized {} + /// ``` + pub fn probe_trait(&self, name: &str) -> bool { + self.probe(format!("pub trait Probe: {} + Sized {{}}", name)) + .unwrap_or(false) + } + + /// Emits a config value `has_TRAIT` if `probe_trait` returns true. + /// + /// Any non-identifier characters in the trait `name` will be replaced with + /// `_` in the generated config value. + pub fn emit_has_trait(&self, name: &str) { + if self.probe_trait(name) { + emit(&format!("has_{}", mangle(name))); + } + } + + /// Emits the given `cfg` value if `probe_trait` returns true. + pub fn emit_trait_cfg(&self, name: &str, cfg: &str) { + if self.probe_trait(name) { + emit(cfg); + } + } + + /// Tests whether the given type can be used. + /// + /// The test code is subject to change, but currently looks like: + /// + /// ```ignore + /// pub type Probe = TYPE; + /// ``` + pub fn probe_type(&self, name: &str) -> bool { + self.probe(format!("pub type Probe = {};", name)) + .unwrap_or(false) + } + + /// Emits a config value `has_TYPE` if `probe_type` returns true. + /// + /// Any non-identifier characters in the type `name` will be replaced with + /// `_` in the generated config value. + pub fn emit_has_type(&self, name: &str) { + if self.probe_type(name) { + emit(&format!("has_{}", mangle(name))); + } + } + + /// Emits the given `cfg` value if `probe_type` returns true. + pub fn emit_type_cfg(&self, name: &str, cfg: &str) { + if self.probe_type(name) { + emit(cfg); + } + } +} + +fn mangle(s: &str) -> String { + s.chars() + .map(|c| match c { + 'A'...'Z' | 'a'...'z' | '0'...'9' => c, + _ => '_', + }) + .collect() +} --- /dev/null +++ b/vendor/autocfg/src/tests.rs @@ -0,0 +1,99 @@ +use super::AutoCfg; + +impl AutoCfg { + fn core_std(&self, path: &str) -> String { + let krate = if self.no_std { "core" } else { "std" }; + format!("{}::{}", krate, path) + } +} + +#[test] +fn autocfg_version() { + let ac = AutoCfg::with_dir("target").unwrap(); + println!("version: {:?}", ac.rustc_version); + assert!(ac.probe_rustc_version(1, 0)); +} + +#[test] +fn version_cmp() { + use super::version::Version; + let v123 = Version::new(1, 2, 3); + + assert!(Version::new(1, 0, 0) < v123); + assert!(Version::new(1, 2, 2) < v123); + assert!(Version::new(1, 2, 3) == v123); + assert!(Version::new(1, 2, 4) > v123); + assert!(Version::new(1, 10, 0) > v123); + assert!(Version::new(2, 0, 0) > v123); +} + +#[test] +fn probe_add() { + let ac = AutoCfg::with_dir("target").unwrap(); + let add = ac.core_std("ops::Add"); + let add_rhs = ac.core_std("ops::Add"); + let add_rhs_output = ac.core_std("ops::Add"); + assert!(ac.probe_path(&add)); + assert!(ac.probe_trait(&add)); + assert!(ac.probe_trait(&add_rhs)); + assert!(ac.probe_trait(&add_rhs_output)); + assert!(ac.probe_type(&add_rhs_output)); +} + +#[test] +fn probe_as_ref() { + let ac = AutoCfg::with_dir("target").unwrap(); + let as_ref = ac.core_std("convert::AsRef"); + let as_ref_str = ac.core_std("convert::AsRef"); + assert!(ac.probe_path(&as_ref)); + assert!(ac.probe_trait(&as_ref_str)); + assert!(ac.probe_type(&as_ref_str)); +} + +#[test] +fn probe_i128() { + let ac = AutoCfg::with_dir("target").unwrap(); + let missing = !ac.probe_rustc_version(1, 26); + let i128_path = ac.core_std("i128"); + assert!(missing ^ ac.probe_path(&i128_path)); + assert!(missing ^ ac.probe_type("i128")); +} + +#[test] +fn probe_sum() { + let ac = AutoCfg::with_dir("target").unwrap(); + let missing = !ac.probe_rustc_version(1, 12); + let sum = ac.core_std("iter::Sum"); + let sum_i32 = ac.core_std("iter::Sum"); + assert!(missing ^ ac.probe_path(&sum)); + assert!(missing ^ ac.probe_trait(&sum)); + assert!(missing ^ ac.probe_trait(&sum_i32)); + assert!(missing ^ ac.probe_type(&sum_i32)); +} + +#[test] +fn probe_std() { + let ac = AutoCfg::with_dir("target").unwrap(); + assert_eq!(ac.probe_sysroot_crate("std"), !ac.no_std); +} + +#[test] +fn probe_alloc() { + let ac = AutoCfg::with_dir("target").unwrap(); + let missing = !ac.probe_rustc_version(1, 36); + assert!(missing ^ ac.probe_sysroot_crate("alloc")); +} + +#[test] +fn probe_bad_sysroot_crate() { + let ac = AutoCfg::with_dir("target").unwrap(); + assert!(!ac.probe_sysroot_crate("doesnt_exist")); +} + +#[test] +fn probe_no_std() { + let ac = AutoCfg::with_dir("target").unwrap(); + assert!(ac.probe_type("i32")); + assert!(ac.probe_type("[i32]")); + assert_eq!(ac.probe_type("Vec"), !ac.no_std); +} --- /dev/null +++ b/vendor/autocfg/src/version.rs @@ -0,0 +1,60 @@ +use std::path::Path; +use std::process::Command; +use std::str; + +use super::{error, Error}; + +/// A version structure for making relative comparisons. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct Version { + major: usize, + minor: usize, + patch: usize, +} + +impl Version { + /// Creates a `Version` instance for a specific `major.minor.patch` version. + pub fn new(major: usize, minor: usize, patch: usize) -> Self { + Version { + major: major, + minor: minor, + patch: patch, + } + } + + pub fn from_rustc(rustc: &Path) -> Result { + // Get rustc's verbose version + let output = try!(Command::new(rustc) + .args(&["--version", "--verbose"]) + .output() + .map_err(error::from_io)); + if !output.status.success() { + return Err(error::from_str("could not execute rustc")); + } + let output = try!(str::from_utf8(&output.stdout).map_err(error::from_utf8)); + + // Find the release line in the verbose version output. + let release = match output.lines().find(|line| line.starts_with("release: ")) { + Some(line) => &line["release: ".len()..], + None => return Err(error::from_str("could not find rustc release")), + }; + + // Strip off any extra channel info, e.g. "-beta.N", "-nightly" + let version = match release.find('-') { + Some(i) => &release[..i], + None => release, + }; + + // Split the version into semver components. + let mut iter = version.splitn(3, '.'); + let major = try!(iter.next().ok_or(error::from_str("missing major version"))); + let minor = try!(iter.next().ok_or(error::from_str("missing minor version"))); + let patch = try!(iter.next().ok_or(error::from_str("missing patch version"))); + + Ok(Version::new( + try!(major.parse().map_err(error::from_num)), + try!(minor.parse().map_err(error::from_num)), + try!(patch.parse().map_err(error::from_num)), + )) + } +} --- a/vendor/cssparser/.cargo-checksum.json +++ b/vendor/cssparser/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"99d0445140451d806afb253209d7fb144fe0879f52b2ba69da621237f8dd546b","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README.md":"c5781e673335f37ed3d7acb119f8ed33efdf6eb75a7094b7da2abe0c3230adb8","build.rs":"ce686e87cccb6aa85a8cd34688d809398c5a624f179fd9a172d1049892da3f4c","build/match_byte.rs":"31905ae3dba69fa82c1f13069df4cd056bb340d59ee5d177679425f105f203cf","docs/404.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","docs/index.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","src/color.rs":"c60f1b0ab7a2a6213e434604ee33f78e7ef74347f325d86d0b9192d8225ae1cc","src/cow_rc_str.rs":"541216f8ef74ee3cc5cbbc1347e5f32ed66588c401851c9a7d68b867aede1de0","src/from_bytes.rs":"331fe63af2123ae3675b61928a69461b5ac77799fff3ce9978c55cf2c558f4ff","src/lib.rs":"a474ee88ef8f73fcb7b7272d426e5eafb4ad10d104797a5a188d1676c8180972","src/macros.rs":"adb9773c157890381556ea83d7942dcc676f99eea71abbb6afeffee1e3f28960","src/nth.rs":"5c70fb542d1376cddab69922eeb4c05e4fcf8f413f27563a2af50f72a47c8f8c","src/parser.rs":"9ed4aec998221eb2d2ba99db2f9f82a02399fb0c3b8500627f68f5aab872adde","src/rules_and_declarations.rs":"622ce07c117a511d40ce595602d4f4730659a59273388f28553d1a2b0fac92ce","src/serializer.rs":"3e2dfc60613f885cb6f99abfc854fde2a1e00de507431bd2e51178b61abfd69b","src/size_of_tests.rs":"e5f63c8c18721cc3ff7a5407e84f9889ffa10e66da96e8510a696c3e00ad72d5","src/tests.rs":"4a9223b9d2dc982144499aee497515553fc3d9ec86ca7b2e62b6caa5d4a11570","src/tokenizer.rs":"429b2cba419cf8b923fbcc32d3bd34c0b39284ebfcb9fc29b8eb8643d8d5f312","src/unicode_range.rs":"191d50a1588e5c88608b84cfe9279def71f495f8e016fa093f90399bbd2b635f"},"package":"495beddc39b1987b8e9f029354eccbd5ef88eb5f1cd24badb764dce338acf2e0"} \ No newline at end of file +{"files":{"Cargo.toml":"9b63e0b5efaef0d3eaf50854bb45a8dd559f6eae4f69640943b27de4e4597023","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README.md":"a533b45a9c43083c6a6000a9c99a1acfed123d6430b232352ae02f1144a09f12","build.rs":"a41191c5917b37c911d4c191305ad975ad6d4c6cc778c38cfdf5c63c161aaffb","build/match_byte.rs":"190fa542a4a69ac197f9768320b03750cce2f61b3d99d84491d3c3116dafb461","docs/404.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","docs/index.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","src/color.rs":"af1c53f333cabde0736cc3e43e9045b0727eee6220eda7041af894abed1f0baa","src/cow_rc_str.rs":"89b5dff5cf80eef3fcff0c11799e54a978d02d8b8963a621fbb999d35e7c03a3","src/from_bytes.rs":"b1cf15c4e975523fef46b575598737a39f3c63e5ce0b2bfd6ec627c69c6ea54a","src/lib.rs":"980f4d339e77926eeb2c556f2716f6ed42b7aa38c3ce4ede7079ab67ca70a843","src/macros.rs":"a67ee23aace36d9f407c544b8b522b8180be04e239d1afdc9d849c5ca8022d51","src/nth.rs":"6896c997742af6f014b37ad57a5450c979054fd9c311fed29ec1ac42b2b256d6","src/parser.rs":"fe2eb2be084923bf362de4b95c029beb21f172ad972a6452c400f640b43a583e","src/rules_and_declarations.rs":"b0288def4392faad529296ea7850895470dce8322a712056c5232699688df67a","src/serializer.rs":"18c6e1533a2d84cf080444f034d3690c1a87c354609c5d336bb46d8b63448911","src/size_of_tests.rs":"a628cacc876f240ac1bb9e287cdae293bffc4b86d45d9307e4fc2f822e8f3e84","src/tests.rs":"b7391de644691e1a2d5af6c8c6f7575fa5895010a65c5ba5eae65376bd2c35af","src/tokenizer.rs":"7967940ff2970d6eef9a97feecf3c09f30c19b6d8a1f3e68594305a2dc59e85d","src/unicode_range.rs":"c4655c817db0dabb1d55669ac61a56ecf7f6a6c4353cf5b539b13bea6511c3dd"},"package":"fbe18ca4efb9ba3716c6da66cc3d7e673bf59fa576353011f48c4cfddbdd740e"} \ No newline at end of file --- a/vendor/cssparser/Cargo.toml +++ b/vendor/cssparser/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,7 +12,7 @@ [package] name = "cssparser" -version = "0.24.0" +version = "0.25.9" authors = ["Simon Sapin "] build = "build.rs" exclude = ["src/css-parsing-tests/**", "src/big-data-url.css"] @@ -54,18 +54,21 @@ version = "0.6" version = "2.0" [dev-dependencies.encoding_rs] -version = "0.7" +version = "0.8" + +[dev-dependencies.serde_json] +version = "1.0" +[build-dependencies.autocfg] +version = "0.1.4" -[dev-dependencies.rustc-serialize] -version = "0.3" [build-dependencies.proc-macro2] -version = "0.4" +version = "1" [build-dependencies.quote] -version = "0.6" +version = "1" [build-dependencies.syn] -version = "0.14" +version = "1" features = ["extra-traits", "fold", "full"] [features] --- a/vendor/cssparser/README.md +++ b/vendor/cssparser/README.md @@ -1,7 +1,7 @@ rust-cssparser ============== -[![Build Status](https://travis-ci.org/servo/rust-cssparser.svg?branch=travis)](https://travis-ci.org/servo/rust-cssparser) +[![Build Status](https://travis-ci.com/servo/rust-cssparser.svg)](https://travis-ci.com/servo/rust-cssparser) [Documentation](https://docs.rs/cssparser/) @@ -39,7 +39,7 @@ Parsing CSS involves a series of steps: * Component values can then be parsed into generic rules or declarations. The header and body of rules as well as the value of declarations are still just lists of component values at this point. - See [the `ast` module](src/ast.rs) for the data structures. + See [the `Token` enum](src/tokenizer.rs) for the data structure. * The last step of a full CSS parser is parsing the remaining component values --- a/vendor/cssparser/build.rs +++ b/vendor/cssparser/build.rs @@ -2,19 +2,15 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +extern crate autocfg; #[macro_use] extern crate quote; #[macro_use] extern crate syn; extern crate proc_macro2; -use std::env; -use std::path::Path; - - #[cfg(feature = "dummy_match_byte")] mod codegen { - use std::path::Path; pub fn main() {} } @@ -37,9 +33,12 @@ mod codegen { println!("cargo:rerun-if-changed={}", input.display()); // We have stack overflows on Servo's CI. - let handle = Builder::new().stack_size(128 * 1024 * 1024).spawn(move || { - match_byte::expand(&input, &output); - }).unwrap(); + let handle = Builder::new() + .stack_size(128 * 1024 * 1024) + .spawn(move || { + match_byte::expand(&input, &output); + }) + .unwrap(); handle.join().unwrap(); } @@ -51,5 +50,7 @@ fn main() { println!("cargo:rustc-cfg=rustc_has_pr45225") } + autocfg::new().emit_has_path("std::mem::MaybeUninit"); + codegen::main(); } --- a/vendor/cssparser/build/match_byte.rs +++ b/vendor/cssparser/build/match_byte.rs @@ -8,21 +8,31 @@ use std::io::{Read, Write}; use std::path::Path; use syn; use syn::fold::Fold; +use syn::parse::{Parse, ParseStream, Result}; use proc_macro2::{Span, TokenStream}; -struct MatchByteParser { -} +struct MatchByteParser {} pub fn expand(from: &Path, to: &Path) { let mut source = String::new(); - File::open(from).unwrap().read_to_string(&mut source).unwrap(); + File::open(from) + .unwrap() + .read_to_string(&mut source) + .unwrap(); let ast = syn::parse_file(&source).expect("Parsing rules.rs module"); let mut m = MatchByteParser {}; let ast = m.fold_file(ast); - let code = ast.into_token_stream().to_string().replace("{ ", "{\n").replace(" }", "\n}"); - File::create(to).unwrap().write_all(code.as_bytes()).unwrap(); + let code = ast + .into_token_stream() + .to_string() + .replace("{ ", "{\n") + .replace(" }", "\n}"); + File::create(to) + .unwrap() + .write_all(code.as_bytes()) + .unwrap(); } struct MatchByte { @@ -30,17 +40,23 @@ struct MatchByte { arms: Vec, } -impl syn::synom::Synom for MatchByte { - named!(parse -> Self, do_parse!( - expr: syn!(syn::Expr) >> - punct!(,) >> - arms: many0!(syn!(syn::Arm)) >> ( - MatchByte { - expr, +impl Parse for MatchByte { + fn parse(input: ParseStream) -> Result { + Ok(MatchByte { + expr: { + let expr = input.parse()?; + input.parse::()?; + expr + }, + arms: { + let mut arms = Vec::new(); + while !input.is_empty() { + arms.push(input.call(syn::Arm::parse)?); + } arms - } - ) - )); + }, + }) + } } fn get_byte_from_expr_lit(expr: &Box) -> u8 { @@ -48,15 +64,61 @@ fn get_byte_from_expr_lit(expr: &Box { if let syn::Lit::Byte(ref byte) = *lit { byte.value() - } - else { + } else { panic!("Found a pattern that wasn't a byte") } - }, + } _ => unreachable!(), } } +/// Parse a pattern and fill the table accordingly +fn parse_pat_to_table<'a>(pat: &'a syn::Pat, case_id: u8, wildcard: &mut Option<&'a syn::Ident>, table: &mut [u8; 256]) { + match pat { + &syn::Pat::Lit(syn::PatLit { ref expr, .. }) => { + let value = get_byte_from_expr_lit(expr); + if table[value as usize] == 0 { + table[value as usize] = case_id; + } + } + &syn::Pat::Range(syn::PatRange { ref lo, ref hi, .. }) => { + let lo = get_byte_from_expr_lit(lo); + let hi = get_byte_from_expr_lit(hi); + for value in lo..hi { + if table[value as usize] == 0 { + table[value as usize] = case_id; + } + } + if table[hi as usize] == 0 { + table[hi as usize] = case_id; + } + } + &syn::Pat::Wild(_) => { + for byte in table.iter_mut() { + if *byte == 0 { + *byte = case_id; + } + } + } + &syn::Pat::Ident(syn::PatIdent { ref ident, .. }) => { + assert_eq!(*wildcard, None); + *wildcard = Some(ident); + for byte in table.iter_mut() { + if *byte == 0 { + *byte = case_id; + } + } + }, + &syn::Pat::Or(syn::PatOr { ref cases, .. }) => { + for case in cases { + parse_pat_to_table(case, case_id, wildcard, table); + } + } + _ => { + panic!("Unexpected pattern: {:?}. Buggy code ?", pat); + } + } +} /// Expand a TokenStream corresponding to the `match_byte` macro. /// @@ -83,48 +145,8 @@ fn expand_match_byte(body: &TokenStream) let case_id = i + 1; let index = case_id as isize; let name = syn::Ident::new(&format!("Case{}", case_id), Span::call_site()); + parse_pat_to_table(&arm.pat, case_id as u8, &mut wildcard, &mut table); - for pat in &arm.pats { - match pat { - &syn::Pat::Lit(syn::PatLit{ref expr}) => { - let value = get_byte_from_expr_lit(expr); - if table[value as usize] == 0 { - table[value as usize] = case_id as u8; - } - }, - &syn::Pat::Range(syn::PatRange { ref lo, ref hi, .. }) => { - let lo = get_byte_from_expr_lit(lo); - let hi = get_byte_from_expr_lit(hi); - for value in lo..hi { - if table[value as usize] == 0 { - table[value as usize] = case_id as u8; - } - } - if table[hi as usize] == 0 { - table[hi as usize] = case_id as u8; - } - }, - &syn::Pat::Wild(_) => { - for byte in table.iter_mut() { - if *byte == 0 { - *byte = case_id as u8; - } - } - }, - &syn::Pat::Ident(syn::PatIdent { ref ident, .. }) => { - assert_eq!(wildcard, None); - wildcard = Some(ident); - for byte in table.iter_mut() { - if *byte == 0 { - *byte = case_id as u8; - } - } - }, - _ => { - panic!("Unexpected pattern: {:?}. Buggy code ?", pat); - } - } - } cases.push(quote!(#name = #index)); let body = &arm.body; match_body.push(quote!(Case::#name => { #body })) @@ -152,11 +174,14 @@ fn expand_match_byte(body: &TokenStream) impl Fold for MatchByteParser { fn fold_stmt(&mut self, stmt: syn::Stmt) -> syn::Stmt { match stmt { - syn::Stmt::Item(syn::Item::Macro(syn::ItemMacro{ ref mac, .. })) => { + syn::Stmt::Item(syn::Item::Macro(syn::ItemMacro { ref mac, .. })) => { if mac.path == parse_quote!(match_byte) { - return syn::fold::fold_stmt(self, syn::Stmt::Expr(expand_match_byte(&mac.tts))) + return syn::fold::fold_stmt( + self, + syn::Stmt::Expr(expand_match_byte(&mac.tokens)), + ); } - }, + } _ => {} } @@ -165,11 +190,11 @@ impl Fold for MatchByteParser { fn fold_expr(&mut self, expr: syn::Expr) -> syn::Expr { match expr { - syn::Expr::Macro(syn::ExprMacro{ ref mac, .. }) => { + syn::Expr::Macro(syn::ExprMacro { ref mac, .. }) => { if mac.path == parse_quote!(match_byte) { - return syn::fold::fold_expr(self, expand_match_byte(&mac.tts)) + return syn::fold::fold_expr(self, expand_match_byte(&mac.tokens)); } - }, + } _ => {} } --- a/vendor/cssparser/src/color.rs +++ b/vendor/cssparser/src/color.rs @@ -2,16 +2,17 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -use std::fmt; use std::f32::consts::PI; +use std::fmt; -use super::{Token, Parser, ToCss, ParseError, BasicParseError}; +use super::{BasicParseError, ParseError, Parser, ToCss, Token}; #[cfg(feature = "serde")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// A color with red, green, blue, and alpha components, in a byte each. #[derive(Clone, Copy, PartialEq, Debug)] +#[repr(C)] pub struct RGBA { /// The red component. pub red: u8, @@ -46,7 +47,12 @@ impl RGBA { /// Same thing, but with `u8` values instead of floats in the 0 to 1 range. #[inline] pub fn new(red: u8, green: u8, blue: u8, alpha: u8) -> Self { - RGBA { red: red, green: green, blue: blue, alpha: alpha } + RGBA { + red: red, + green: green, + blue: blue, + alpha: alpha, + } } /// Returns the red channel in a floating point number form, from 0 to 1. @@ -77,7 +83,8 @@ impl RGBA { #[cfg(feature = "serde")] impl Serialize for RGBA { fn serialize(&self, serializer: S) -> Result - where S: Serializer + where + S: Serializer, { (self.red, self.green, self.blue, self.alpha).serialize(serializer) } @@ -86,7 +93,8 @@ impl Serialize for RGBA { #[cfg(feature = "serde")] impl<'de> Deserialize<'de> for RGBA { fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> + where + D: Deserializer<'de>, { let (r, g, b, a) = Deserialize::deserialize(deserializer)?; Ok(RGBA::new(r, g, b, a)) @@ -98,7 +106,8 @@ known_heap_size!(0, RGBA); impl ToCss for RGBA { fn to_css(&self, dest: &mut W) -> fmt::Result - where W: fmt::Write, + where + W: fmt::Write, { let serialize_alpha = self.alpha != 255; @@ -136,7 +145,10 @@ pub enum Color { known_heap_size!(0, Color); impl ToCss for Color { - fn to_css(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { + fn to_css(&self, dest: &mut W) -> fmt::Result + where + W: fmt::Write, + { match *self { Color::CurrentColor => dest.write_str("currentcolor"), Color::RGBA(ref rgba) => rgba.to_css(dest), @@ -209,7 +221,9 @@ pub trait ColorComponentParser<'i> { let location = input.current_source_location(); Ok(match *input.next()? { Token::Number { value, .. } => AngleOrNumber::Number { value }, - Token::Dimension { value: v, ref unit, .. } => { + Token::Dimension { + value: v, ref unit, .. + } => { let degrees = match_ignore_ascii_case! { &*unit, "deg" => v, "grad" => v * 360. / 400., @@ -220,7 +234,7 @@ pub trait ColorComponentParser<'i> { AngleOrNumber::Angle { degrees } } - ref t => return Err(location.new_unexpected_token_error(t.clone())) + ref t => return Err(location.new_unexpected_token_error(t.clone())), }) } @@ -251,7 +265,7 @@ pub trait ColorComponentParser<'i> { Ok(match *input.next()? { Token::Number { value, .. } => NumberOrPercentage::Number { value }, Token::Percentage { unit_value, .. } => NumberOrPercentage::Percentage { unit_value }, - ref t => return Err(location.new_unexpected_token_error(t.clone())) + ref t => return Err(location.new_unexpected_token_error(t.clone())), }) } } @@ -278,21 +292,20 @@ impl Color { match token { Token::Hash(ref value) | Token::IDHash(ref value) => { Color::parse_hash(value.as_bytes()) - }, + } Token::Ident(ref value) => parse_color_keyword(&*value), Token::Function(ref name) => { return input.parse_nested_block(|arguments| { parse_color_function(component_parser, &*name, arguments) }) } - _ => Err(()) - }.map_err(|()| location.new_unexpected_token_error(token)) + _ => Err(()), + } + .map_err(|()| location.new_unexpected_token_error(token)) } /// Parse a value, per CSS Color Module Level 3. - pub fn parse<'i, 't>( - input: &mut Parser<'i, 't>, - ) -> Result> { + pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result> { let component_parser = DefaultComponentParser; Self::parse_with(&component_parser, input).map_err(ParseError::basic) } @@ -305,25 +318,25 @@ impl Color { from_hex(value[0])? * 16 + from_hex(value[1])?, from_hex(value[2])? * 16 + from_hex(value[3])?, from_hex(value[4])? * 16 + from_hex(value[5])?, - from_hex(value[6])? * 16 + from_hex(value[7])?), - ), + from_hex(value[6])? * 16 + from_hex(value[7])?, + )), 6 => Ok(rgb( from_hex(value[0])? * 16 + from_hex(value[1])?, from_hex(value[2])? * 16 + from_hex(value[3])?, - from_hex(value[4])? * 16 + from_hex(value[5])?), - ), + from_hex(value[4])? * 16 + from_hex(value[5])?, + )), 4 => Ok(rgba( from_hex(value[0])? * 17, from_hex(value[1])? * 17, from_hex(value[2])? * 17, - from_hex(value[3])? * 17), - ), + from_hex(value[3])? * 17, + )), 3 => Ok(rgb( from_hex(value[0])? * 17, from_hex(value[1])? * 17, - from_hex(value[2])? * 17), - ), - _ => Err(()) + from_hex(value[2])? * 17, + )), + _ => Err(()), } } } @@ -338,7 +351,6 @@ fn rgba(red: u8, green: u8, blue: u8, al Color::RGBA(RGBA::new(red, green, blue, alpha)) } - /// Return the named color with the given name. /// /// Matching is case-insensitive in the ASCII range. @@ -354,7 +366,7 @@ pub fn parse_color_keyword(ident: &str) blue: $blue, alpha: 255, }) - } + }; } ascii_case_insensitive_phf_map! { keyword -> Color = { @@ -515,14 +527,13 @@ pub fn parse_color_keyword(ident: &str) keyword(ident).cloned().ok_or(()) } - #[inline] fn from_hex(c: u8) -> Result { match c { - b'0' ... b'9' => Ok(c - b'0'), - b'a' ... b'f' => Ok(c - b'a' + 10), - b'A' ... b'F' => Ok(c - b'A' + 10), - _ => Err(()) + b'0'..=b'9' => Ok(c - b'0'), + b'a'..=b'f' => Ok(c - b'a' + 10), + b'A'..=b'F' => Ok(c - b'A' + 10), + _ => Err(()), } } @@ -552,7 +563,7 @@ fn clamp_floor_256_f32(val: f32) -> u8 { fn parse_color_function<'i, 't, ComponentParser>( component_parser: &ComponentParser, name: &str, - arguments: &mut Parser<'i, 't> + arguments: &mut Parser<'i, 't>, ) -> Result> where ComponentParser: ColorComponentParser<'i>, @@ -569,7 +580,11 @@ where } else { arguments.expect_delim('/')?; }; - clamp_unit_f32(component_parser.parse_number_or_percentage(arguments)?.unit_value()) + clamp_unit_f32( + component_parser + .parse_number_or_percentage(arguments)? + .unit_value(), + ) } else { 255 }; @@ -578,11 +593,10 @@ where Ok(rgba(red, green, blue, alpha)) } - #[inline] fn parse_rgb_components_rgb<'i, 't, ComponentParser>( component_parser: &ComponentParser, - arguments: &mut Parser<'i, 't> + arguments: &mut Parser<'i, 't>, ) -> Result<(u8, u8, u8, bool), ParseError<'i, ComponentParser::Error>> where ComponentParser: ColorComponentParser<'i>, @@ -590,15 +604,11 @@ where // Either integers or percentages, but all the same type. // https://drafts.csswg.org/css-color/#rgb-functions let (red, is_number) = match component_parser.parse_number_or_percentage(arguments)? { - NumberOrPercentage::Number { value } => { - (clamp_floor_256_f32(value), true) - } - NumberOrPercentage::Percentage { unit_value } => { - (clamp_unit_f32(unit_value), false) - } + NumberOrPercentage::Number { value } => (clamp_floor_256_f32(value), true), + NumberOrPercentage::Percentage { unit_value } => (clamp_unit_f32(unit_value), false), }; - let uses_commas = arguments.try(|i| i.expect_comma()).is_ok(); + let uses_commas = arguments.try_parse(|i| i.expect_comma()).is_ok(); let green; let blue; @@ -622,7 +632,7 @@ where #[inline] fn parse_rgb_components_hsl<'i, 't, ComponentParser>( component_parser: &ComponentParser, - arguments: &mut Parser<'i, 't> + arguments: &mut Parser<'i, 't>, ) -> Result<(u8, u8, u8, bool), ParseError<'i, ComponentParser::Error>> where ComponentParser: ColorComponentParser<'i>, @@ -637,7 +647,7 @@ where // Saturation and lightness are clamped to 0% ... 100% // https://drafts.csswg.org/css-color/#the-hsl-notation - let uses_commas = arguments.try(|i| i.expect_comma()).is_ok(); + let uses_commas = arguments.try_parse(|i| i.expect_comma()).is_ok(); let saturation = component_parser.parse_percentage(arguments)?; let saturation = saturation.max(0.).min(1.); @@ -652,16 +662,28 @@ where // https://drafts.csswg.org/css-color/#hsl-color // except with h pre-multiplied by 3, to avoid some rounding errors. fn hue_to_rgb(m1: f32, m2: f32, mut h3: f32) -> f32 { - if h3 < 0. { h3 += 3. } - if h3 > 3. { h3 -= 3. } + if h3 < 0. { + h3 += 3. + } + if h3 > 3. { + h3 -= 3. + } - if h3 * 2. < 1. { m1 + (m2 - m1) * h3 * 2. } - else if h3 * 2. < 3. { m2 } - else if h3 < 2. { m1 + (m2 - m1) * (2. - h3) * 2. } - else { m1 } + if h3 * 2. < 1. { + m1 + (m2 - m1) * h3 * 2. + } else if h3 * 2. < 3. { + m2 + } else if h3 < 2. { + m1 + (m2 - m1) * (2. - h3) * 2. + } else { + m1 + } } - let m2 = if lightness <= 0.5 { lightness * (saturation + 1.) } - else { lightness + saturation - lightness * saturation }; + let m2 = if lightness <= 0.5 { + lightness * (saturation + 1.) + } else { + lightness + saturation - lightness * saturation + }; let m1 = lightness * 2. - m2; let hue_times_3 = hue * 3.; let red = clamp_unit_f32(hue_to_rgb(m1, m2, hue_times_3 + 1.)); --- a/vendor/cssparser/src/cow_rc_str.rs +++ b/vendor/cssparser/src/cow_rc_str.rs @@ -103,16 +103,12 @@ impl<'a> Clone for CowRcStr<'a> { fn clone(&self) -> Self { match self.unpack() { Err(ptr) => { - let rc = unsafe { - Rc::from_raw(ptr) - }; + let rc = unsafe { Rc::from_raw(ptr) }; let new_rc = rc.clone(); - mem::forget(rc); // Don’t actually take ownership of this strong reference + mem::forget(rc); // Don’t actually take ownership of this strong reference CowRcStr::from_rc(new_rc) } - Ok(_) => { - CowRcStr { ..*self } - } + Ok(_) => CowRcStr { ..*self }, } } } @@ -121,9 +117,7 @@ impl<'a> Drop for CowRcStr<'a> { #[inline] fn drop(&mut self) { if let Err(ptr) = self.unpack() { - mem::drop(unsafe { - Rc::from_raw(ptr) - }) + mem::drop(unsafe { Rc::from_raw(ptr) }) } } } @@ -133,9 +127,7 @@ impl<'a> Deref for CowRcStr<'a> { #[inline] fn deref(&self) -> &str { - self.unpack().unwrap_or_else(|ptr| unsafe { - &**ptr - }) + self.unpack().unwrap_or_else(|ptr| unsafe { &**ptr }) } } --- a/vendor/cssparser/src/from_bytes.rs +++ b/vendor/cssparser/src/from_bytes.rs @@ -17,7 +17,6 @@ pub trait EncodingSupport { fn is_utf16_be_or_le(encoding: &Self::Encoding) -> bool; } - /// Determine the character encoding of a CSS stylesheet. /// /// This is based on the presence of a BOM (Byte Order Mark), an `@charset` rule, and @@ -30,31 +29,32 @@ pub trait EncodingSupport { /// (https://drafts.csswg.org/css-syntax/#environment-encoding), if any. /// /// Returns the encoding to use. -pub fn stylesheet_encoding(css: &[u8], protocol_encoding_label: Option<&[u8]>, - environment_encoding: Option) - -> E::Encoding - where E: EncodingSupport { +pub fn stylesheet_encoding( + css: &[u8], + protocol_encoding_label: Option<&[u8]>, + environment_encoding: Option, +) -> E::Encoding +where + E: EncodingSupport, +{ // https://drafts.csswg.org/css-syntax/#the-input-byte-stream - match protocol_encoding_label { - None => (), - Some(label) => match E::from_label(label) { - None => (), - Some(protocol_encoding) => return protocol_encoding - } - } + if let Some(label) = protocol_encoding_label { + if let Some(protocol_encoding) = E::from_label(label) { + return protocol_encoding; + }; + }; + let prefix = b"@charset \""; if css.starts_with(prefix) { let rest = &css[prefix.len()..]; - match rest.iter().position(|&b| b == b'"') { - None => (), - Some(label_length) => if rest[label_length..].starts_with(b"\";") { + if let Some(label_length) = rest.iter().position(|&b| b == b'"') { + if rest[label_length..].starts_with(b"\";") { let label = &rest[..label_length]; - match E::from_label(label) { - None => (), - Some(charset_encoding) => if E::is_utf16_be_or_le(&charset_encoding) { - return E::utf8() + if let Some(charset_encoding) = E::from_label(label) { + if E::is_utf16_be_or_le(&charset_encoding) { + return E::utf8(); } else { - return charset_encoding + return charset_encoding; } } } --- a/vendor/cssparser/src/lib.rs +++ b/vendor/cssparser/src/lib.rs @@ -4,7 +4,6 @@ #![crate_name = "cssparser"] #![crate_type = "rlib"] - #![cfg_attr(feature = "bench", feature(test))] #![deny(missing_docs)] @@ -32,7 +31,7 @@ As a consequence, when calling another p * Any `Err(())` return value must be propagated. This happens by definition for tail calls, - and can otherwise be done with the `try!` macro. + and can otherwise be done with the `?` operator. * Or the call must be wrapped in a `Parser::try` call. `try` takes a closure that takes a `Parser` and returns a `Result`, calls it once, @@ -46,7 +45,7 @@ Examples: // 'none' | fn parse_background_image(context: &ParserContext, input: &mut Parser) -> Result, ()> { - if input.try(|input| input.expect_ident_matching("none")).is_ok() { + if input.try_parse(|input| input.expect_ident_matching("none")).is_ok() { Ok(None) } else { Image::parse(context, input).map(Some) // tail call @@ -58,50 +57,68 @@ fn parse_background_image(context: &Pars // [ | ] [ | ]? fn parse_border_spacing(_context: &ParserContext, input: &mut Parser) -> Result<(LengthOrPercentage, LengthOrPercentage), ()> { - let first = try!(LengthOrPercentage::parse); - let second = input.try(LengthOrPercentage::parse).unwrap_or(first); + let first = LengthOrPercentage::parse?; + let second = input.try_parse(LengthOrPercentage::parse).unwrap_or(first); (first, second) } ``` */ -#![recursion_limit="200"] // For color::parse_color_keyword +#![recursion_limit = "200"] // For color::parse_color_keyword extern crate dtoa_short; extern crate itoa; -#[macro_use] extern crate cssparser_macros; -#[macro_use] extern crate matches; -#[macro_use] extern crate procedural_masquerade; -#[doc(hidden)] pub extern crate phf as _internal__phf; -#[cfg(test)] extern crate encoding_rs; -#[cfg(test)] extern crate difference; -#[cfg(test)] extern crate rustc_serialize; -#[cfg(feature = "serde")] extern crate serde; -#[cfg(feature = "heapsize")] #[macro_use] extern crate heapsize; +#[macro_use] +extern crate cssparser_macros; +#[macro_use] +extern crate matches; +#[macro_use] +extern crate procedural_masquerade; +#[cfg(test)] +extern crate difference; +#[cfg(test)] +extern crate encoding_rs; +#[doc(hidden)] +pub extern crate phf as _internal__phf; +#[cfg(test)] +extern crate serde_json; +#[cfg(feature = "serde")] +extern crate serde; +#[cfg(feature = "heapsize")] +#[macro_use] +extern crate heapsize; extern crate smallvec; pub use cssparser_macros::*; -pub use tokenizer::{Token, SourcePosition, SourceLocation}; -pub use rules_and_declarations::{parse_important}; -pub use rules_and_declarations::{DeclarationParser, DeclarationListParser, parse_one_declaration}; -pub use rules_and_declarations::{RuleListParser, parse_one_rule}; -pub use rules_and_declarations::{AtRuleType, QualifiedRuleParser, AtRuleParser}; +pub use color::{ + parse_color_keyword, AngleOrNumber, Color, ColorComponentParser, NumberOrPercentage, RGBA, +}; +pub use cow_rc_str::CowRcStr; pub use from_bytes::{stylesheet_encoding, EncodingSupport}; -pub use color::{RGBA, Color, parse_color_keyword, AngleOrNumber, NumberOrPercentage, ColorComponentParser}; pub use nth::parse_nth; -pub use serializer::{ToCss, CssStringWriter, serialize_identifier, serialize_name, serialize_string, TokenSerializationType}; -pub use parser::{Parser, Delimiter, Delimiters, ParserState, ParserInput}; -pub use parser::{ParseError, ParseErrorKind, BasicParseError, BasicParseErrorKind}; +pub use parser::{BasicParseError, BasicParseErrorKind, ParseError, ParseErrorKind}; +pub use parser::{Delimiter, Delimiters, Parser, ParserInput, ParserState}; +pub use rules_and_declarations::parse_important; +pub use rules_and_declarations::{parse_one_declaration, DeclarationListParser, DeclarationParser}; +pub use rules_and_declarations::{parse_one_rule, RuleListParser}; +pub use rules_and_declarations::{AtRuleParser, AtRuleType, QualifiedRuleParser}; +pub use serializer::{ + serialize_identifier, serialize_name, serialize_string, CssStringWriter, ToCss, + TokenSerializationType, +}; +pub use tokenizer::{SourceLocation, SourcePosition, Token}; pub use unicode_range::UnicodeRange; -pub use cow_rc_str::CowRcStr; // For macros -#[doc(hidden)] pub use macros::_internal__to_lowercase; +#[doc(hidden)] +pub use macros::_internal__to_lowercase; // For macros when used in this crate. Unsure how $crate works with procedural-masquerade. -mod cssparser { pub use _internal__phf; } +mod cssparser { + pub use _internal__phf; +} #[macro_use] mod macros; @@ -115,13 +132,15 @@ mod tokenizer; mod tokenizer { include!(concat!(env!("OUT_DIR"), "/tokenizer.rs")); } -mod parser; -mod from_bytes; mod color; +mod cow_rc_str; +mod from_bytes; mod nth; +mod parser; mod serializer; mod unicode_range; -mod cow_rc_str; -#[cfg(test)] mod tests; -#[cfg(test)] mod size_of_tests; +#[cfg(test)] +mod size_of_tests; +#[cfg(test)] +mod tests; --- a/vendor/cssparser/src/macros.rs +++ b/vendor/cssparser/src/macros.rs @@ -2,7 +2,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -/// See docs of the `procedural-masquerade` crate. +// See docs of the `procedural-masquerade` crate. define_invoke_proc_macro!(cssparser_internal__invoke_proc_macro); /// Expands to a `match` expression with string patterns, @@ -110,15 +110,39 @@ macro_rules! ascii_case_insensitive_phf_ #[doc(hidden)] macro_rules! cssparser_internal__to_lowercase { ($input: expr, $BUFFER_SIZE: expr => $output: ident) => { - // mem::uninitialized() is ok because `buffer` is only used in `_internal__to_lowercase`, + let mut buffer; + // Safety: `buffer` is only used in `_internal__to_lowercase`, // which initializes with `copy_from_slice` the part of the buffer it uses, // before it uses it. #[allow(unsafe_code)] - let mut buffer: [u8; $BUFFER_SIZE] = unsafe { - ::std::mem::uninitialized() - }; + let buffer = unsafe { cssparser_internal__uninit!(buffer, $BUFFER_SIZE) }; let input: &str = $input; - let $output = $crate::_internal__to_lowercase(&mut buffer, input); + let $output = $crate::_internal__to_lowercase(buffer, input); + }; +} + +#[cfg(has_std__mem__MaybeUninit)] +#[macro_export] +#[doc(hidden)] +macro_rules! cssparser_internal__uninit { + ($buffer: ident, $BUFFER_SIZE: expr) => { + { + $buffer = ::std::mem::MaybeUninit::<[u8; $BUFFER_SIZE]>::uninit(); + &mut *($buffer.as_mut_ptr()) + } + } +} + +// FIXME: remove this when we require Rust 1.36 +#[cfg(not(has_std__mem__MaybeUninit))] +#[macro_export] +#[doc(hidden)] +macro_rules! cssparser_internal__uninit { + ($buffer: ident, $BUFFER_SIZE: expr) => { + { + $buffer = ::std::mem::uninitialized::<[u8; $BUFFER_SIZE]>(); + &mut $buffer + } } } @@ -132,14 +156,12 @@ macro_rules! cssparser_internal__to_lowe #[allow(non_snake_case)] pub fn _internal__to_lowercase<'a>(buffer: &'a mut [u8], input: &'a str) -> Option<&'a str> { if let Some(buffer) = buffer.get_mut(..input.len()) { - if let Some(first_uppercase) = input.bytes().position(|byte| matches!(byte, b'A'...b'Z')) { + if let Some(first_uppercase) = input.bytes().position(|byte| matches!(byte, b'A'..=b'Z')) { buffer.copy_from_slice(input.as_bytes()); - ::std::ascii::AsciiExt::make_ascii_lowercase(&mut buffer[first_uppercase..]); + buffer[first_uppercase..].make_ascii_lowercase(); // `buffer` was initialized to a copy of `input` (which is &str so well-formed UTF-8) // then lowercased (which preserves UTF-8 well-formedness) - unsafe { - Some(::std::str::from_utf8_unchecked(buffer)) - } + unsafe { Some(::std::str::from_utf8_unchecked(buffer)) } } else { // Input is already lower-case Some(input) --- a/vendor/cssparser/src/nth.rs +++ b/vendor/cssparser/src/nth.rs @@ -2,10 +2,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -#[allow(unused_imports)] use std::ascii::AsciiExt; - -use super::{Token, Parser, ParserInput, BasicParseError}; - +use super::{BasicParseError, Parser, ParserInput, Token}; /// Parse the *An+B* notation, as found in the `:nth-child()` selector. /// The input is typically the arguments of a function, @@ -14,14 +11,18 @@ use super::{Token, Parser, ParserInput, pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), BasicParseError<'i>> { // FIXME: remove .clone() when lifetimes are non-lexical. match input.next()?.clone() { - Token::Number { int_value: Some(b), .. } => { - Ok((0, b)) - } - Token::Dimension { int_value: Some(a), unit, .. } => { + Token::Number { + int_value: Some(b), .. + } => Ok((0, b)), + Token::Dimension { + int_value: Some(a), + unit, + .. + } => { match_ignore_ascii_case! { &unit, - "n" => Ok(try!(parse_b(input, a))), - "n-" => Ok(try!(parse_signless_b(input, a, -1))), + "n" => Ok(parse_b(input, a)?), + "n-" => Ok(parse_signless_b(input, a, -1)?), _ => match parse_n_dash_digits(&*unit) { Ok(b) => Ok((a, b)), Err(()) => Err(input.new_basic_unexpected_token_error(Token::Ident(unit.clone()))) @@ -32,10 +33,10 @@ pub fn parse_nth<'i, 't>(input: &mut Par match_ignore_ascii_case! { &value, "even" => Ok((2, 0)), "odd" => Ok((2, 1)), - "n" => Ok(try!(parse_b(input, 1))), - "-n" => Ok(try!(parse_b(input, -1))), - "n-" => Ok(try!(parse_signless_b(input, 1, -1))), - "-n-" => Ok(try!(parse_signless_b(input, -1, -1))), + "n" => Ok(parse_b(input, 1)?), + "-n" => Ok(parse_b(input, -1)?), + "n-" => Ok(parse_signless_b(input, 1, -1)?), + "-n-" => Ok(parse_signless_b(input, -1, -1)?), _ => { let (slice, a) = if value.starts_with("-") { (&value[1..], -1) @@ -67,13 +68,16 @@ pub fn parse_nth<'i, 't>(input: &mut Par } } - fn parse_b<'i, 't>(input: &mut Parser<'i, 't>, a: i32) -> Result<(i32, i32), BasicParseError<'i>> { let start = input.state(); match input.next() { Ok(&Token::Delim('+')) => parse_signless_b(input, a, 1), Ok(&Token::Delim('-')) => parse_signless_b(input, a, -1), - Ok(&Token::Number { has_sign: true, int_value: Some(b), .. }) => Ok((a, b)), + Ok(&Token::Number { + has_sign: true, + int_value: Some(b), + .. + }) => Ok((a, b)), _ => { input.reset(&start); Ok((a, 0)) @@ -81,21 +85,29 @@ fn parse_b<'i, 't>(input: &mut Parser<'i } } -fn parse_signless_b<'i, 't>(input: &mut Parser<'i, 't>, a: i32, b_sign: i32) -> Result<(i32, i32), BasicParseError<'i>> { +fn parse_signless_b<'i, 't>( + input: &mut Parser<'i, 't>, + a: i32, + b_sign: i32, +) -> Result<(i32, i32), BasicParseError<'i>> { // FIXME: remove .clone() when lifetimes are non-lexical. match input.next()?.clone() { - Token::Number { has_sign: false, int_value: Some(b), .. } => Ok((a, b_sign * b)), - token => Err(input.new_basic_unexpected_token_error(token)) + Token::Number { + has_sign: false, + int_value: Some(b), + .. + } => Ok((a, b_sign * b)), + token => Err(input.new_basic_unexpected_token_error(token)), } } fn parse_n_dash_digits(string: &str) -> Result { let bytes = string.as_bytes(); if bytes.len() >= 3 - && bytes[..2].eq_ignore_ascii_case(b"n-") - && bytes[2..].iter().all(|&c| matches!(c, b'0'...b'9')) + && bytes[..2].eq_ignore_ascii_case(b"n-") + && bytes[2..].iter().all(|&c| matches!(c, b'0'..=b'9')) { - Ok(parse_number_saturate(&string[1..]).unwrap()) // Include the minus sign + Ok(parse_number_saturate(&string[1..]).unwrap()) // Include the minus sign } else { Err(()) } @@ -104,14 +116,17 @@ fn parse_n_dash_digits(string: &str) -> fn parse_number_saturate(string: &str) -> Result { let mut input = ParserInput::new(string); let mut parser = Parser::new(&mut input); - let int = if let Ok(&Token::Number {int_value: Some(int), ..}) - = parser.next_including_whitespace_and_comments() { + let int = if let Ok(&Token::Number { + int_value: Some(int), + .. + }) = parser.next_including_whitespace_and_comments() + { int } else { - return Err(()) + return Err(()); }; if !parser.is_exhausted() { - return Err(()) + return Err(()); } Ok(int) } --- a/vendor/cssparser/src/parser.rs +++ b/vendor/cssparser/src/parser.rs @@ -4,11 +4,9 @@ use cow_rc_str::CowRcStr; use smallvec::SmallVec; -use std::ops::Range; -#[allow(unused_imports)] use std::ascii::AsciiExt; use std::ops::BitOr; -use tokenizer::{Token, Tokenizer, SourcePosition, SourceLocation}; - +use std::ops::Range; +use tokenizer::{SourceLocation, SourcePosition, Token, Tokenizer}; /// A capture of the internal state of a `Parser` (including the position within the input), /// obtained from the `Parser::position` method. @@ -114,7 +112,10 @@ pub enum ParseErrorKind<'i, T: 'i> { impl<'i, T> ParseErrorKind<'i, T> { /// Like `std::convert::Into::into` - pub fn into(self) -> ParseErrorKind<'i, U> where T: Into { + pub fn into(self) -> ParseErrorKind<'i, U> + where + T: Into, + { match self { ParseErrorKind::Basic(basic) => ParseErrorKind::Basic(basic), ParseErrorKind::Custom(custom) => ParseErrorKind::Custom(custom.into()), @@ -144,7 +145,10 @@ impl<'i, T> ParseError<'i, T> { } /// Like `std::convert::Into::into` - pub fn into(self) -> ParseError<'i, U> where T: Into { + pub fn into(self) -> ParseError<'i, U> + where + T: Into, + { ParseError { kind: self.kind.into(), location: self.location, @@ -199,7 +203,6 @@ pub struct Parser<'i: 't, 't> { stop_before: Delimiters, } - #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub(crate) enum BlockType { Parenthesis, @@ -207,15 +210,13 @@ pub(crate) enum BlockType { CurlyBracket, } - impl BlockType { fn opening(token: &Token) -> Option { match *token { - Token::Function(_) | - Token::ParenthesisBlock => Some(BlockType::Parenthesis), + Token::Function(_) | Token::ParenthesisBlock => Some(BlockType::Parenthesis), Token::SquareBracketBlock => Some(BlockType::SquareBracket), Token::CurlyBracketBlock => Some(BlockType::CurlyBracket), - _ => None + _ => None, } } @@ -224,12 +225,11 @@ impl BlockType { Token::CloseParenthesis => Some(BlockType::Parenthesis), Token::CloseSquareBracket => Some(BlockType::SquareBracket), Token::CloseCurlyBracket => Some(BlockType::CurlyBracket), - _ => None + _ => None, } } } - /// A set of characters, to be used with the `Parser::parse_until*` methods. /// /// The union of two sets can be obtained with the `|` operator. Example: @@ -273,7 +273,9 @@ impl BitOr for Delimiters { #[inline] fn bitor(self, other: Delimiters) -> Delimiters { - Delimiters { bits: self.bits | other.bits } + Delimiters { + bits: self.bits | other.bits, + } } } @@ -338,16 +340,21 @@ impl<'i: 't, 't> Parser<'i, 't> { } /// Check whether the input is exhausted. That is, if `.next()` would return a token. - /// Return a `Result` so that the `try!` macro can be used: `try!(input.expect_exhausted())` + /// Return a `Result` so that the `?` operator can be used: `input.expect_exhausted()?` /// /// This ignores whitespace and comments. #[inline] pub fn expect_exhausted(&mut self) -> Result<(), BasicParseError<'i>> { let start = self.state(); let result = match self.next() { - Err(BasicParseError { kind: BasicParseErrorKind::EndOfInput, .. }) => Ok(()), + Err(BasicParseError { + kind: BasicParseErrorKind::EndOfInput, + .. + }) => Ok(()), Err(e) => unreachable!("Unexpected error encountered: {:?}", e), - Ok(t) => Err(start.source_location().new_basic_unexpected_token_error(t.clone())), + Ok(t) => Err(start + .source_location() + .new_basic_unexpected_token_error(t.clone())), }; self.reset(&start); result @@ -421,6 +428,16 @@ impl<'i: 't, 't> Parser<'i, 't> { self.new_error(BasicParseErrorKind::UnexpectedToken(token)) } + /// Create a new unexpected token or EOF ParseError at the current location + #[inline] + pub fn new_error_for_next_token(&mut self) -> ParseError<'i, E> { + let token = match self.next() { + Ok(token) => token.clone(), + Err(e) => return e.into(), + }; + self.new_error(BasicParseErrorKind::UnexpectedToken(token)) + } + /// Return the current internal state of the parser (including position within the input). /// /// This state can later be restored with the `Parser::reset` method. @@ -428,7 +445,7 @@ impl<'i: 't, 't> Parser<'i, 't> { pub fn state(&self) -> ParserState { ParserState { at_start_of: self.at_start_of, - .. self.input.tokenizer.state() + ..self.input.tokenizer.state() } } @@ -455,7 +472,7 @@ impl<'i: 't, 't> Parser<'i, 't> { pub(crate) fn next_byte(&self) -> Option { let byte = self.input.tokenizer.next_byte(); if self.stop_before.contains(Delimiters::from_byte(byte)) { - return None + return None; } byte } @@ -470,17 +487,28 @@ impl<'i: 't, 't> Parser<'i, 't> { self.at_start_of = state.at_start_of; } - /// Start looking for `var()` functions. (See the `.seen_var_functions()` method.) + /// Start looking for `var()` / `env()` functions. (See the + /// `.seen_var_or_env_functions()` method.) #[inline] - pub fn look_for_var_functions(&mut self) { - self.input.tokenizer.look_for_var_functions() + pub fn look_for_var_or_env_functions(&mut self) { + self.input.tokenizer.look_for_var_or_env_functions() } - /// Return whether a `var()` function has been seen by the tokenizer since - /// either `look_for_var_functions` was called, and stop looking. + /// Return whether a `var()` or `env()` function has been seen by the + /// tokenizer since either `look_for_var_or_env_functions` was called, and + /// stop looking. #[inline] - pub fn seen_var_functions(&mut self) -> bool { - self.input.tokenizer.seen_var_functions() + pub fn seen_var_or_env_functions(&mut self) -> bool { + self.input.tokenizer.seen_var_or_env_functions() + } + + /// The old name of `try_parse`, which requires raw identifiers in the Rust 2018 edition. + #[inline] + pub fn try(&mut self, thing: F) -> Result + where + F: FnOnce(&mut Parser<'i, 't>) -> Result, + { + self.try_parse(thing) } /// Execute the given closure, passing it the parser. @@ -488,8 +516,10 @@ impl<'i: 't, 't> Parser<'i, 't> { /// the internal state of the parser (including position within the input) /// is restored to what it was before the call. #[inline] - pub fn try(&mut self, thing: F) -> Result - where F: FnOnce(&mut Parser<'i, 't>) -> Result { + pub fn try_parse(&mut self, thing: F) -> Result + where + F: FnOnce(&mut Parser<'i, 't>) -> Result, + { let start = self.state(); let result = thing(self); if result.is_err() { @@ -531,8 +561,8 @@ impl<'i: 't, 't> Parser<'i, 't> { loop { match self.next_including_whitespace_and_comments() { Err(e) => return Err(e), - Ok(&Token::Comment(_)) => {}, - _ => break + Ok(&Token::Comment(_)) => {} + _ => break, } } Ok(self.input.cached_token_ref()) @@ -544,39 +574,47 @@ impl<'i: 't, 't> Parser<'i, 't> { /// where comments are preserved. /// When parsing higher-level values, per the CSS Syntax specification, /// comments should always be ignored between tokens. - pub fn next_including_whitespace_and_comments(&mut self) -> Result<&Token<'i>, BasicParseError<'i>> { + pub fn next_including_whitespace_and_comments( + &mut self, + ) -> Result<&Token<'i>, BasicParseError<'i>> { if let Some(block_type) = self.at_start_of.take() { consume_until_end_of_block(block_type, &mut self.input.tokenizer); } let byte = self.input.tokenizer.next_byte(); if self.stop_before.contains(Delimiters::from_byte(byte)) { - return Err(self.new_basic_error(BasicParseErrorKind::EndOfInput)) + return Err(self.new_basic_error(BasicParseErrorKind::EndOfInput)); } let token_start_position = self.input.tokenizer.position(); - let token; - match self.input.cached_token { - Some(ref cached_token) - if cached_token.start_position == token_start_position => { - self.input.tokenizer.reset(&cached_token.end_state); - match cached_token.token { - Token::Function(ref name) => self.input.tokenizer.see_function(name), - _ => {} - } - token = &cached_token.token + let using_cached_token = self + .input + .cached_token + .as_ref() + .map_or(false, |cached_token| { + cached_token.start_position == token_start_position + }); + let token = if using_cached_token { + let cached_token = self.input.cached_token.as_ref().unwrap(); + self.input.tokenizer.reset(&cached_token.end_state); + match cached_token.token { + Token::Function(ref name) => self.input.tokenizer.see_function(name), + _ => {} } - _ => { - let new_token = self.input.tokenizer.next() - .map_err(|()| self.new_basic_error(BasicParseErrorKind::EndOfInput))?; - self.input.cached_token = Some(CachedToken { - token: new_token, - start_position: token_start_position, - end_state: self.input.tokenizer.state(), - }); - token = self.input.cached_token_ref() - } - } + &cached_token.token + } else { + let new_token = self + .input + .tokenizer + .next() + .map_err(|()| self.new_basic_error(BasicParseErrorKind::EndOfInput))?; + self.input.cached_token = Some(CachedToken { + token: new_token, + start_position: token_start_position, + end_state: self.input.tokenizer.state(), + }); + self.input.cached_token_ref() + }; if let Some(block_type) = BlockType::opening(token) { self.at_start_of = Some(block_type); @@ -590,7 +628,9 @@ impl<'i: 't, 't> Parser<'i, 't> { /// This can help tell e.g. `color: green;` from `color: green 4px;` #[inline] pub fn parse_entirely(&mut self, parse: F) -> Result> - where F: FnOnce(&mut Parser<'i, 't>) -> Result> { + where + F: FnOnce(&mut Parser<'i, 't>) -> Result>, + { let result = parse(self)?; self.expect_exhausted()?; Ok(result) @@ -607,15 +647,20 @@ impl<'i: 't, 't> Parser<'i, 't> { /// This method retuns `Err(())` the first time that a closure call does, /// or if a closure call leaves some input before the next comma or the end of the input. #[inline] - pub fn parse_comma_separated(&mut self, mut parse_one: F) -> Result, ParseError<'i, E>> - where F: for<'tt> FnMut(&mut Parser<'i, 'tt>) -> Result> { + pub fn parse_comma_separated( + &mut self, + mut parse_one: F, + ) -> Result, ParseError<'i, E>> + where + F: for<'tt> FnMut(&mut Parser<'i, 'tt>) -> Result>, + { // Vec grows from 0 to 4 by default on first push(). So allocate with // capacity 1, so in the somewhat common case of only one item we don't // way overallocate. Note that we always push at least one item if // parsing succeeds. let mut values = Vec::with_capacity(1); loop { - self.skip_whitespace(); // Unnecessary for correctness, but may help try() in parse_one rewind less. + self.skip_whitespace(); // Unnecessary for correctness, but may help try() in parse_one rewind less. values.push(self.parse_until_before(Delimiter::Comma, &mut parse_one)?); match self.next() { Err(_) => return Ok(values), @@ -637,8 +682,10 @@ impl<'i: 't, 't> Parser<'i, 't> { /// /// The result is overridden to `Err(())` if the closure leaves some input before that point. #[inline] - pub fn parse_nested_block(&mut self, parse: F) -> Result > - where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result> { + pub fn parse_nested_block(&mut self, parse: F) -> Result> + where + F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result>, + { parse_nested_block(self, parse) } @@ -651,9 +698,14 @@ impl<'i: 't, 't> Parser<'i, 't> { /// /// The result is overridden to `Err(())` if the closure leaves some input before that point. #[inline] - pub fn parse_until_before(&mut self, delimiters: Delimiters, parse: F) - -> Result > - where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result> { + pub fn parse_until_before( + &mut self, + delimiters: Delimiters, + parse: F, + ) -> Result> + where + F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result>, + { parse_until_before(self, delimiters, parse) } @@ -663,9 +715,14 @@ impl<'i: 't, 't> Parser<'i, 't> { /// (e.g. if these is only one in the given set) /// or if it was there at all (as opposed to reaching the end of the input). #[inline] - pub fn parse_until_after(&mut self, delimiters: Delimiters, parse: F) - -> Result > - where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result> { + pub fn parse_until_after( + &mut self, + delimiters: Delimiters, + parse: F, + ) -> Result> + where + F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result>, + { parse_until_after(self, delimiters, parse) } @@ -675,7 +732,7 @@ impl<'i: 't, 't> Parser<'i, 't> { let start_location = self.current_source_location(); match *self.next_including_whitespace()? { Token::WhiteSpace(value) => Ok(value), - ref t => Err(start_location.new_basic_unexpected_token_error(t.clone())) + ref t => Err(start_location.new_basic_unexpected_token_error(t.clone())), } } @@ -695,7 +752,10 @@ impl<'i: 't, 't> Parser<'i, 't> { /// Parse a whose unescaped value is an ASCII-insensitive match for the given value. #[inline] - pub fn expect_ident_matching(&mut self, expected_value: &str) -> Result<(), BasicParseError<'i>> { + pub fn expect_ident_matching( + &mut self, + expected_value: &str, + ) -> Result<(), BasicParseError<'i>> { expect! {self, Token::Ident(ref value) if value.eq_ignore_ascii_case(expected_value) => Ok(()), } @@ -732,8 +792,10 @@ impl<'i: 't, 't> Parser<'i, 't> { Token::UnquotedUrl(ref value) => return Ok(value.clone()), Token::Function(ref name) if name.eq_ignore_ascii_case("url") => {} } - self.parse_nested_block(|input| input.expect_string().map_err(Into::into).map(|s| s.clone())) - .map_err(ParseError::<()>::basic) + self.parse_nested_block(|input| { + input.expect_string().map_err(Into::into).map(|s| s.clone()) + }) + .map_err(ParseError::<()>::basic) } /// Parse either a or a , and return the unescaped value. @@ -745,8 +807,10 @@ impl<'i: 't, 't> Parser<'i, 't> { Token::QuotedString(ref value) => return Ok(value.clone()), Token::Function(ref name) if name.eq_ignore_ascii_case("url") => {} } - self.parse_nested_block(|input| input.expect_string().map_err(Into::into).map(|s| s.clone())) - .map_err(ParseError::<()>::basic) + self.parse_nested_block(|input| { + input.expect_string().map_err(Into::into).map(|s| s.clone()) + }) + .map_err(ParseError::<()>::basic) } /// Parse a and return the integer value. @@ -850,7 +914,10 @@ impl<'i: 't, 't> Parser<'i, 't> { /// /// If the result is `Ok`, you can then call the `Parser::parse_nested_block` method. #[inline] - pub fn expect_function_matching(&mut self, expected_name: &str) -> Result<(), BasicParseError<'i>> { + pub fn expect_function_matching( + &mut self, + expected_name: &str, + ) -> Result<(), BasicParseError<'i>> { expect! {self, Token::Function(ref name) if name.eq_ignore_ascii_case(expected_name) => Ok(()), } @@ -865,21 +932,22 @@ impl<'i: 't, 't> Parser<'i, 't> { let token; loop { match self.next_including_whitespace_and_comments() { - Ok(&Token::Function(_)) | - Ok(&Token::ParenthesisBlock) | - Ok(&Token::SquareBracketBlock) | - Ok(&Token::CurlyBracketBlock) => {} + Ok(&Token::Function(_)) + | Ok(&Token::ParenthesisBlock) + | Ok(&Token::SquareBracketBlock) + | Ok(&Token::CurlyBracketBlock) => {} Ok(t) => { if t.is_parse_error() { token = t.clone(); - break + break; } - continue + continue; } - Err(_) => return Ok(()) + Err(_) => return Ok(()), } - let result = self.parse_nested_block(|input| input.expect_no_error_token() - .map_err(|e| Into::into(e))); + let result = self.parse_nested_block(|input| { + input.expect_no_error_token().map_err(|e| Into::into(e)) + }); result.map_err(ParseError::<()>::basic)? } // FIXME: maybe these should be separate variants of BasicParseError instead? @@ -887,11 +955,14 @@ impl<'i: 't, 't> Parser<'i, 't> { } } -pub fn parse_until_before<'i: 't, 't, F, T, E>(parser: &mut Parser<'i, 't>, - delimiters: Delimiters, - parse: F) - -> Result > - where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result> { +pub fn parse_until_before<'i: 't, 't, F, T, E>( + parser: &mut Parser<'i, 't>, + delimiters: Delimiters, + parse: F, +) -> Result> +where + F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result>, +{ let delimiters = parser.stop_before | delimiters; let result; // Introduce a new scope to limit duration of nested_parser’s borrow @@ -909,27 +980,34 @@ pub fn parse_until_before<'i: 't, 't, F, // FIXME: have a special-purpose tokenizer method for this that does less work. loop { if delimiters.contains(Delimiters::from_byte(parser.input.tokenizer.next_byte())) { - break + break; } if let Ok(token) = parser.input.tokenizer.next() { if let Some(block_type) = BlockType::opening(&token) { consume_until_end_of_block(block_type, &mut parser.input.tokenizer); } } else { - break + break; } } result } -pub fn parse_until_after<'i: 't, 't, F, T, E>(parser: &mut Parser<'i, 't>, - delimiters: Delimiters, - parse: F) - -> Result > - where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result> { +pub fn parse_until_after<'i: 't, 't, F, T, E>( + parser: &mut Parser<'i, 't>, + delimiters: Delimiters, + parse: F, +) -> Result> +where + F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result>, +{ let result = parser.parse_until_before(delimiters, parse); let next_byte = parser.input.tokenizer.next_byte(); - if next_byte.is_some() && !parser.stop_before.contains(Delimiters::from_byte(next_byte)) { + if next_byte.is_some() + && !parser + .stop_before + .contains(Delimiters::from_byte(next_byte)) + { debug_assert!(delimiters.contains(Delimiters::from_byte(next_byte))); // We know this byte is ASCII. parser.input.tokenizer.advance(1); @@ -940,14 +1018,20 @@ pub fn parse_until_after<'i: 't, 't, F, result } -pub fn parse_nested_block<'i: 't, 't, F, T, E>(parser: &mut Parser<'i, 't>, parse: F) - -> Result > - where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result> { - let block_type = parser.at_start_of.take().expect("\ - A nested parser can only be created when a Function, \ - ParenthesisBlock, SquareBracketBlock, or CurlyBracketBlock \ - token was just consumed.\ - "); +pub fn parse_nested_block<'i: 't, 't, F, T, E>( + parser: &mut Parser<'i, 't>, + parse: F, +) -> Result> +where + F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result>, +{ + let block_type = parser.at_start_of.take().expect( + "\ + A nested parser can only be created when a Function, \ + ParenthesisBlock, SquareBracketBlock, or CurlyBracketBlock \ + token was just consumed.\ + ", + ); let closing_delimiter = match block_type { BlockType::CurlyBracket => ClosingDelimiter::CloseCurlyBracket, BlockType::SquareBracket => ClosingDelimiter::CloseSquareBracket, --- a/vendor/cssparser/src/rules_and_declarations.rs +++ b/vendor/cssparser/src/rules_and_declarations.rs @@ -4,15 +4,14 @@ // https://drafts.csswg.org/css-syntax/#parsing -use cow_rc_str::CowRcStr; -use parser::{parse_until_before, parse_until_after, parse_nested_block, ParserState}; -#[allow(unused_imports)] use std::ascii::AsciiExt; use super::{BasicParseError, BasicParseErrorKind, Delimiter}; use super::{ParseError, Parser, SourceLocation, Token}; +use cow_rc_str::CowRcStr; +use parser::{parse_nested_block, parse_until_after, parse_until_before, ParserState}; /// Parse `!important`. /// -/// Typical usage is `input.try(parse_important).is_ok()` +/// Typical usage is `input.try_parse(parse_important).is_ok()` /// at the end of a `DeclarationParser::parse_value` implementation. pub fn parse_important<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(), BasicParseError<'i>> { input.expect_delim('!')?; @@ -61,10 +60,13 @@ pub trait DeclarationParser<'i> { /// (In declaration lists, before the next semicolon or end of the current block.) /// /// If `!important` can be used in a given context, - /// `input.try(parse_important).is_ok()` should be used at the end + /// `input.try_parse(parse_important).is_ok()` should be used at the end /// of the implementation of this method and the result should be part of the return value. - fn parse_value<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>) - -> Result>; + fn parse_value<'t>( + &mut self, + name: CowRcStr<'i>, + input: &mut Parser<'i, 't>, + ) -> Result>; } /// A trait to provide various parsing of at-rules. @@ -75,7 +77,7 @@ pub trait DeclarationParser<'i> { /// /// Default implementations that reject all at-rules are provided, /// so that `impl AtRuleParser<(), ()> for ... {}` can be used -/// for using `DeclarationListParser` to parse a declartions list with only qualified rules. +/// for using `DeclarationListParser` to parse a declarations list with only qualified rules. pub trait AtRuleParser<'i> { /// The intermediate representation of prelude of an at-rule without block; type PreludeNoBlock; @@ -106,9 +108,12 @@ pub trait AtRuleParser<'i> { /// The given `input` is a "delimited" parser /// that ends wherever the prelude should end. /// (Before the next semicolon, the next `{`, or the end of the current block.) - fn parse_prelude<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>) - -> Result, - ParseError<'i, Self::Error>> { + fn parse_prelude<'t>( + &mut self, + name: CowRcStr<'i>, + input: &mut Parser<'i, 't>, + ) -> Result, ParseError<'i, Self::Error>> + { let _ = name; let _ = input; Err(input.new_error(BasicParseErrorKind::AtRuleInvalid(name))) @@ -129,8 +134,10 @@ pub trait AtRuleParser<'i> { ) -> Self::AtRule { let _ = prelude; let _ = location; - panic!("The `AtRuleParser::rule_without_block` method must be overriden \ - if `AtRuleParser::parse_prelude` ever returns `AtRuleType::WithoutBlock`.") + panic!( + "The `AtRuleParser::rule_without_block` method must be overriden \ + if `AtRuleParser::parse_prelude` ever returns `AtRuleType::WithoutBlock`." + ) } /// Parse the content of a `{ /* ... */ }` block for the body of the at-rule. @@ -185,8 +192,10 @@ pub trait QualifiedRuleParser<'i> { /// /// The given `input` is a "delimited" parser /// that ends where the prelude should end (before the next `{`). - fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>) - -> Result> { + fn parse_prelude<'t>( + &mut self, + input: &mut Parser<'i, 't>, + ) -> Result> { let _ = input; Err(input.new_error(BasicParseErrorKind::QualifiedRuleInvalid)) } @@ -211,7 +220,6 @@ pub trait QualifiedRuleParser<'i> { } } - /// Provides an iterator for declaration list parsing. pub struct DeclarationListParser<'i: 't, 't: 'a, 'a, P> { /// The input given to `DeclarationListParser::new` @@ -221,10 +229,10 @@ pub struct DeclarationListParser<'i: 't, pub parser: P, } - impl<'i: 't, 't: 'a, 'a, I, P, E: 'i> DeclarationListParser<'i, 't, 'a, P> -where P: DeclarationParser<'i, Declaration = I, Error = E> + - AtRuleParser<'i, AtRule = I, Error = E> { +where + P: DeclarationParser<'i, Declaration = I, Error = E> + AtRuleParser<'i, AtRule = I, Error = E>, +{ /// Create a new `DeclarationListParser` for the given `input` and `parser`. /// /// Note that all CSS declaration lists can on principle contain at-rules. @@ -250,8 +258,9 @@ where P: DeclarationParser<'i, Declarati /// `DeclarationListParser` is an iterator that yields `Ok(_)` for a valid declaration or at-rule /// or `Err(())` for an invalid one. impl<'i: 't, 't: 'a, 'a, I, P, E: 'i> Iterator for DeclarationListParser<'i, 't, 'a, P> -where P: DeclarationParser<'i, Declaration = I, Error = E> + - AtRuleParser<'i, AtRule = I, Error = E> { +where + P: DeclarationParser<'i, Declaration = I, Error = E> + AtRuleParser<'i, AtRule = I, Error = E>, +{ type Item = Result, &'i str)>; fn next(&mut self) -> Option { @@ -259,7 +268,9 @@ where P: DeclarationParser<'i, Declarati let start = self.input.state(); // FIXME: remove intermediate variable when lifetimes are non-lexical let ident = match self.input.next_including_whitespace_and_comments() { - Ok(&Token::WhiteSpace(_)) | Ok(&Token::Comment(_)) | Ok(&Token::Semicolon) => continue, + Ok(&Token::WhiteSpace(_)) | Ok(&Token::Comment(_)) | Ok(&Token::Semicolon) => { + continue + } Ok(&Token::Ident(ref name)) => Ok(Ok(name.clone())), Ok(&Token::AtKeyword(ref name)) => Ok(Err(name.clone())), Ok(token) => Err(token.clone()), @@ -271,29 +282,34 @@ where P: DeclarationParser<'i, Declarati let result = { let parser = &mut self.parser; // FIXME: https://github.com/rust-lang/rust/issues/42508 - parse_until_after::<'i, 't, _, _, _>(self.input, Delimiter::Semicolon, |input| { - input.expect_colon()?; - parser.parse_value(name, input) - }) + parse_until_after::<'i, 't, _, _, _>( + self.input, + Delimiter::Semicolon, + |input| { + input.expect_colon()?; + parser.parse_value(name, input) + }, + ) }; - return Some(result.map_err(|e| (e, self.input.slice_from(start.position())))) + return Some(result.map_err(|e| (e, self.input.slice_from(start.position())))); } Ok(Err(name)) => { // At-keyword - return Some(parse_at_rule(&start, name, self.input, &mut self.parser)) + return Some(parse_at_rule(&start, name, self.input, &mut self.parser)); } Err(token) => { let result = self.input.parse_until_after(Delimiter::Semicolon, |_| { - Err(start.source_location().new_unexpected_token_error(token.clone())) + Err(start + .source_location() + .new_unexpected_token_error(token.clone())) }); - return Some(result.map_err(|e| (e, self.input.slice_from(start.position())))) + return Some(result.map_err(|e| (e, self.input.slice_from(start.position())))); } } } } } - /// Provides an iterator for rule list parsing. pub struct RuleListParser<'i: 't, 't: 'a, 'a, P> { /// The input given to `RuleListParser::new` @@ -306,10 +322,11 @@ pub struct RuleListParser<'i: 't, 't: 'a any_rule_so_far: bool, } - impl<'i: 't, 't: 'a, 'a, R, P, E: 'i> RuleListParser<'i, 't, 'a, P> -where P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E> + - AtRuleParser<'i, AtRule = R, Error = E> { +where + P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E> + + AtRuleParser<'i, AtRule = R, Error = E>, +{ /// Create a new `RuleListParser` for the given `input` at the top-level of a stylesheet /// and the given `parser`. /// @@ -345,12 +362,12 @@ where P: QualifiedRuleParser<'i, Qualifi } } - - /// `RuleListParser` is an iterator that yields `Ok(_)` for a rule or `Err(())` for an invalid one. impl<'i: 't, 't: 'a, 'a, R, P, E: 'i> Iterator for RuleListParser<'i, 't, 'a, P> -where P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E> + - AtRuleParser<'i, AtRule = R, Error = E> { +where + P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E> + + AtRuleParser<'i, AtRule = R, Error = E>, +{ type Item = Result, &'i str)>; fn next(&mut self) -> Option { @@ -375,7 +392,7 @@ where P: QualifiedRuleParser<'i, Qualifi } } Some(_) => at_keyword = None, - None => return None + None => return None, } if let Some(name) = at_keyword { @@ -383,40 +400,52 @@ where P: QualifiedRuleParser<'i, Qualifi self.any_rule_so_far = true; if first_stylesheet_rule && name.eq_ignore_ascii_case("charset") { let delimiters = Delimiter::Semicolon | Delimiter::CurlyBracketBlock; - let _: Result<(), ParseError<()>> = self.input.parse_until_after(delimiters, |_| Ok(())); + let _: Result<(), ParseError<()>> = + self.input.parse_until_after(delimiters, |_| Ok(())); } else { - return Some(parse_at_rule(&start, name.clone(), self.input, &mut self.parser)) + return Some(parse_at_rule( + &start, + name.clone(), + self.input, + &mut self.parser, + )); } } else { self.any_rule_so_far = true; let result = parse_qualified_rule(self.input, &mut self.parser); - return Some(result.map_err(|e| (e, self.input.slice_from(start.position())))) + return Some(result.map_err(|e| (e, self.input.slice_from(start.position())))); } } } } - /// Parse a single declaration, such as an `( /* ... */ )` parenthesis in an `@supports` prelude. -pub fn parse_one_declaration<'i, 't, P, E>(input: &mut Parser<'i, 't>, parser: &mut P) - -> Result<

>::Declaration, - (ParseError<'i, E>, &'i str)> - where P: DeclarationParser<'i, Error = E> { +pub fn parse_one_declaration<'i, 't, P, E>( + input: &mut Parser<'i, 't>, + parser: &mut P, +) -> Result<

>::Declaration, (ParseError<'i, E>, &'i str)> +where + P: DeclarationParser<'i, Error = E>, +{ let start_position = input.position(); - input.parse_entirely(|input| { - let name = input.expect_ident()?.clone(); - input.expect_colon()?; - parser.parse_value(name, input) - }) - .map_err(|e| (e, input.slice_from(start_position))) + input + .parse_entirely(|input| { + let name = input.expect_ident()?.clone(); + input.expect_colon()?; + parser.parse_value(name, input) + }) + .map_err(|e| (e, input.slice_from(start_position))) } - /// Parse a single rule, such as for CSSOM’s `CSSStyleSheet.insertRule`. -pub fn parse_one_rule<'i, 't, R, P, E>(input: &mut Parser<'i, 't>, parser: &mut P) - -> Result> -where P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E> + - AtRuleParser<'i, AtRule = R, Error = E> { +pub fn parse_one_rule<'i, 't, R, P, E>( + input: &mut Parser<'i, 't>, + parser: &mut P, +) -> Result> +where + P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E> + + AtRuleParser<'i, AtRule = R, Error = E>, +{ input.parse_entirely(|input| { input.skip_whitespace(); let start = input.state(); @@ -450,7 +479,7 @@ fn parse_at_rule<'i: 't, 't, P, E>( parser: &mut P, ) -> Result<

>::AtRule, (ParseError<'i, E>, &'i str)> where - P: AtRuleParser<'i, Error = E> + P: AtRuleParser<'i, Error = E>, { let location = input.current_source_location(); let delimiters = Delimiter::Semicolon | Delimiter::CurlyBracketBlock; @@ -459,67 +488,64 @@ where parser.parse_prelude(name, input) }); match result { - Ok(AtRuleType::WithoutBlock(prelude)) => { - match input.next() { - Ok(&Token::Semicolon) | Err(_) => Ok(parser.rule_without_block(prelude, location)), - Ok(&Token::CurlyBracketBlock) => Err(( - input.new_unexpected_token_error(Token::CurlyBracketBlock), - input.slice_from(start.position()), - )), - Ok(_) => unreachable!() - } - } + Ok(AtRuleType::WithoutBlock(prelude)) => match input.next() { + Ok(&Token::Semicolon) | Err(_) => Ok(parser.rule_without_block(prelude, location)), + Ok(&Token::CurlyBracketBlock) => Err(( + input.new_unexpected_token_error(Token::CurlyBracketBlock), + input.slice_from(start.position()), + )), + Ok(_) => unreachable!(), + }, Ok(AtRuleType::WithBlock(prelude)) => { match input.next() { Ok(&Token::CurlyBracketBlock) => { // FIXME: https://github.com/rust-lang/rust/issues/42508 - parse_nested_block::<'i, 't, _, _, _>( - input, - move |input| parser.parse_block(prelude, location, input) - ).map_err(|e| (e, input.slice_from(start.position()))) + parse_nested_block::<'i, 't, _, _, _>(input, move |input| { + parser.parse_block(prelude, location, input) + }) + .map_err(|e| (e, input.slice_from(start.position()))) } Ok(&Token::Semicolon) => Err(( input.new_unexpected_token_error(Token::Semicolon), input.slice_from(start.position()), )), Err(e) => Err((e.into(), input.slice_from(start.position()))), - Ok(_) => unreachable!() + Ok(_) => unreachable!(), } } Err(error) => { let end_position = input.position(); match input.next() { - Ok(&Token::CurlyBracketBlock) | Ok(&Token::Semicolon) | Err(_) => {}, - _ => unreachable!() + Ok(&Token::CurlyBracketBlock) | Ok(&Token::Semicolon) | Err(_) => {} + _ => unreachable!(), }; Err((error, input.slice(start.position()..end_position))) } } } - fn parse_qualified_rule<'i, 't, P, E>( input: &mut Parser<'i, 't>, parser: &mut P, ) -> Result<

>::QualifiedRule, ParseError<'i, E>> where - P: QualifiedRuleParser<'i, Error = E> + P: QualifiedRuleParser<'i, Error = E>, { let location = input.current_source_location(); // FIXME: https://github.com/rust-lang/rust/issues/42508 - let prelude = parse_until_before::<'i, 't, _, _, _>(input, Delimiter::CurlyBracketBlock, |input| { - parser.parse_prelude(input) - }); + let prelude = + parse_until_before::<'i, 't, _, _, _>(input, Delimiter::CurlyBracketBlock, |input| { + parser.parse_prelude(input) + }); match *input.next()? { Token::CurlyBracketBlock => { // Do this here so that we consume the `{` even if the prelude is `Err`. let prelude = prelude?; // FIXME: https://github.com/rust-lang/rust/issues/42508 - parse_nested_block::<'i, 't, _, _, _>( - input, - move |input| parser.parse_block(prelude, location, input), - ) + parse_nested_block::<'i, 't, _, _, _>(input, move |input| { + parser.parse_block(prelude, location, input) + }) } - _ => unreachable!() + _ => unreachable!(), } } --- a/vendor/cssparser/src/serializer.rs +++ b/vendor/cssparser/src/serializer.rs @@ -4,18 +4,18 @@ use dtoa_short::{self, Notation}; use itoa; -#[allow(unused_imports)] use std::ascii::AsciiExt; use std::fmt::{self, Write}; use std::io; use std::str; use super::Token; - /// Trait for things the can serialize themselves in CSS syntax. pub trait ToCss { /// Serialize `self` in CSS syntax, writing to `dest`. - fn to_css(&self, dest: &mut W) -> fmt::Result where W: fmt::Write; + fn to_css(&self, dest: &mut W) -> fmt::Result + where + W: fmt::Write; /// Serialize `self` in CSS syntax and return a string. /// @@ -29,8 +29,10 @@ pub trait ToCss { } #[inline] -fn write_numeric(value: f32, int_value: Option, has_sign: bool, dest: &mut W) - -> fmt::Result where W: fmt::Write { +fn write_numeric(value: f32, int_value: Option, has_sign: bool, dest: &mut W) -> fmt::Result +where + W: fmt::Write, +{ // `value.value >= 0` is true for negative 0. if has_sign && value.is_sign_positive() { dest.write_str("+")?; @@ -39,7 +41,10 @@ fn write_numeric(value: f32, int_valu let notation = if value == 0.0 && value.is_sign_negative() { // Negative zero. Work around #20596. dest.write_str("-0")?; - Notation { decimal_point: false, scientific: false } + Notation { + decimal_point: false, + scientific: false, + } } else { dtoa_short::write(dest, value)? }; @@ -52,19 +57,21 @@ fn write_numeric(value: f32, int_valu Ok(()) } - impl<'a> ToCss for Token<'a> { - fn to_css(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { + fn to_css(&self, dest: &mut W) -> fmt::Result + where + W: fmt::Write, + { match *self { Token::Ident(ref value) => serialize_identifier(&**value, dest)?, Token::AtKeyword(ref value) => { dest.write_str("@")?; serialize_identifier(&**value, dest)?; - }, + } Token::Hash(ref value) => { dest.write_str("#")?; serialize_name(value, dest)?; - }, + } Token::IDHash(ref value) => { dest.write_str("#")?; serialize_identifier(&**value, dest)?; @@ -74,17 +81,28 @@ impl<'a> ToCss for Token<'a> { dest.write_str("url(")?; serialize_unquoted_url(&**value, dest)?; dest.write_str(")")?; - }, + } Token::Delim(value) => dest.write_char(value)?, - Token::Number { value, int_value, has_sign } => { - write_numeric(value, int_value, has_sign, dest)? - } - Token::Percentage { unit_value, int_value, has_sign } => { + Token::Number { + value, + int_value, + has_sign, + } => write_numeric(value, int_value, has_sign, dest)?, + Token::Percentage { + unit_value, + int_value, + has_sign, + } => { write_numeric(unit_value * 100., int_value, has_sign, dest)?; dest.write_str("%")?; - }, - Token::Dimension { value, int_value, has_sign, ref unit } => { + } + Token::Dimension { + value, + int_value, + has_sign, + ref unit, + } => { write_numeric(value, int_value, has_sign, dest)?; // Disambiguate with scientific notation. let unit = &**unit; @@ -94,7 +112,7 @@ impl<'a> ToCss for Token<'a> { } else { serialize_identifier(unit, dest)?; } - }, + } Token::WhiteSpace(content) => dest.write_str(content)?, Token::Comment(content) => { @@ -116,7 +134,7 @@ impl<'a> ToCss for Token<'a> { Token::Function(ref name) => { serialize_identifier(&**name, dest)?; dest.write_str("(")?; - }, + } Token::ParenthesisBlock => dest.write_str("(")?, Token::SquareBracketBlock => dest.write_str("[")?, Token::CurlyBracketBlock => dest.write_str("{")?, @@ -134,7 +152,7 @@ impl<'a> ToCss for Token<'a> { // and therefore does not have a closing quote. dest.write_char('"')?; CssStringWriter::new(dest).write_str(value)?; - }, + } Token::CloseParenthesis => dest.write_str(")")?, Token::CloseSquareBracket => dest.write_str("]")?, Token::CloseCurlyBracket => dest.write_str("}")?, @@ -143,7 +161,10 @@ impl<'a> ToCss for Token<'a> { } } -fn hex_escape(ascii_byte: u8, dest: &mut W) -> fmt::Result where W:fmt::Write { +fn hex_escape(ascii_byte: u8, dest: &mut W) -> fmt::Result +where + W: fmt::Write, +{ static HEX_DIGITS: &'static [u8; 16] = b"0123456789abcdef"; let b3; let b4; @@ -159,15 +180,21 @@ fn hex_escape(ascii_byte: u8, dest: & dest.write_str(unsafe { str::from_utf8_unchecked(&bytes) }) } -fn char_escape(ascii_byte: u8, dest: &mut W) -> fmt::Result where W:fmt::Write { +fn char_escape(ascii_byte: u8, dest: &mut W) -> fmt::Result +where + W: fmt::Write, +{ let bytes = [b'\\', ascii_byte]; dest.write_str(unsafe { str::from_utf8_unchecked(&bytes) }) } /// Write a CSS identifier, escaping characters as necessary. -pub fn serialize_identifier(mut value: &str, dest: &mut W) -> fmt::Result where W:fmt::Write { +pub fn serialize_identifier(mut value: &str, dest: &mut W) -> fmt::Result +where + W: fmt::Write, +{ if value.is_empty() { - return Ok(()) + return Ok(()); } if value.starts_with("--") { @@ -180,7 +207,7 @@ pub fn serialize_identifier(mut value dest.write_str("-")?; value = &value[1..]; } - if let digit @ b'0'...b'9' = value.as_bytes()[0] { + if let digit @ b'0'..=b'9' = value.as_bytes()[0] { hex_escape(digit, dest)?; value = &value[1..]; } @@ -192,11 +219,14 @@ pub fn serialize_identifier(mut value /// /// You should only use this when you know what you're doing, when in doubt, /// consider using `serialize_identifier`. -pub fn serialize_name(value: &str, dest: &mut W) -> fmt::Result where W:fmt::Write { +pub fn serialize_name(value: &str, dest: &mut W) -> fmt::Result +where + W: fmt::Write, +{ let mut chunk_start = 0; for (i, b) in value.bytes().enumerate() { let escaped = match b { - b'0'...b'9' | b'A'...b'Z' | b'a'...b'z' | b'_' | b'-' => continue, + b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z' | b'_' | b'-' => continue, _ if !b.is_ascii() => continue, b'\0' => Some("\u{FFFD}"), _ => None, @@ -214,14 +244,16 @@ pub fn serialize_name(value: &str, de dest.write_str(&value[chunk_start..]) } - -fn serialize_unquoted_url(value: &str, dest: &mut W) -> fmt::Result where W:fmt::Write { +fn serialize_unquoted_url(value: &str, dest: &mut W) -> fmt::Result +where + W: fmt::Write, +{ let mut chunk_start = 0; for (i, b) in value.bytes().enumerate() { let hex = match b { - b'\0' ... b' ' | b'\x7F' => true, + b'\0'..=b' ' | b'\x7F' => true, b'(' | b')' | b'"' | b'\'' | b'\\' => false, - _ => continue + _ => continue, }; dest.write_str(&value[chunk_start..i])?; if hex { @@ -234,16 +266,17 @@ fn serialize_unquoted_url(value: &str dest.write_str(&value[chunk_start..]) } - /// Write a double-quoted CSS string token, escaping content as necessary. -pub fn serialize_string(value: &str, dest: &mut W) -> fmt::Result where W: fmt::Write { +pub fn serialize_string(value: &str, dest: &mut W) -> fmt::Result +where + W: fmt::Write, +{ dest.write_str("\"")?; CssStringWriter::new(dest).write_str(value)?; dest.write_str("\"")?; Ok(()) } - /// A `fmt::Write` adapter that escapes text for writing as a double-quoted CSS string. /// Quotes are not included. /// @@ -251,12 +284,12 @@ pub fn serialize_string(value: &str, /// /// ```{rust,ignore} /// fn write_foo(foo: &Foo, dest: &mut W) -> fmt::Result where W: fmt::Write { -/// try!(dest.write_str("\"")); +/// dest.write_str("\"")?; /// { /// let mut string_dest = CssStringWriter::new(dest); /// // Write into string_dest... /// } -/// try!(dest.write_str("\"")); +/// dest.write_str("\"")?; /// Ok(()) /// } /// ``` @@ -264,14 +297,20 @@ pub struct CssStringWriter<'a, W: 'a> { inner: &'a mut W, } -impl<'a, W> CssStringWriter<'a, W> where W: fmt::Write { +impl<'a, W> CssStringWriter<'a, W> +where + W: fmt::Write, +{ /// Wrap a text writer to create a `CssStringWriter`. pub fn new(inner: &'a mut W) -> CssStringWriter<'a, W> { CssStringWriter { inner: inner } } } -impl<'a, W> fmt::Write for CssStringWriter<'a, W> where W: fmt::Write { +impl<'a, W> fmt::Write for CssStringWriter<'a, W> +where + W: fmt::Write, +{ fn write_str(&mut self, s: &str) -> fmt::Result { let mut chunk_start = 0; for (i, b) in s.bytes().enumerate() { @@ -279,7 +318,7 @@ impl<'a, W> fmt::Write for CssStringWrit b'"' => Some("\\\""), b'\\' => Some("\\\\"), b'\0' => Some("\u{FFFD}"), - b'\x01'...b'\x1F' | b'\x7F' => None, + b'\x01'..=b'\x1F' | b'\x7F' => None, _ => continue, }; self.inner.write_str(&s[chunk_start..i])?; @@ -293,11 +332,13 @@ impl<'a, W> fmt::Write for CssStringWrit } } - macro_rules! impl_tocss_for_int { ($T: ty) => { impl<'a> ToCss for $T { - fn to_css(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { + fn to_css(&self, dest: &mut W) -> fmt::Result + where + W: fmt::Write, + { struct AssumeUtf8(W); impl io::Write for AssumeUtf8 { @@ -305,7 +346,8 @@ macro_rules! impl_tocss_for_int { fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { // Safety: itoa only emits ASCII, which is also well-formed UTF-8. debug_assert!(buf.is_ascii()); - self.0.write_str(unsafe { str::from_utf8_unchecked(buf) }) + self.0 + .write_str(unsafe { str::from_utf8_unchecked(buf) }) .map_err(|_| io::ErrorKind::Other.into()) } @@ -323,11 +365,11 @@ macro_rules! impl_tocss_for_int { match itoa::write(AssumeUtf8(dest), *self) { Ok(_) => Ok(()), - Err(_) => Err(fmt::Error) + Err(_) => Err(fmt::Error), } } } - } + }; } impl_tocss_for_int!(i8); @@ -342,11 +384,14 @@ impl_tocss_for_int!(u64); macro_rules! impl_tocss_for_float { ($T: ty) => { impl<'a> ToCss for $T { - fn to_css(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { + fn to_css(&self, dest: &mut W) -> fmt::Result + where + W: fmt::Write, + { dtoa_short::write(dest, *self).map(|_| ()) } } - } + }; } impl_tocss_for_float!(f32); @@ -378,25 +423,43 @@ impl TokenSerializationType { /// so that they are not re-parsed as a single token. /// /// See https://drafts.csswg.org/css-syntax/#serialization + /// + /// See https://github.com/w3c/csswg-drafts/issues/4088 for the + /// `DelimPercent` bits. pub fn needs_separator_when_before(self, other: TokenSerializationType) -> bool { use self::TokenSerializationTypeVariants::*; match self.0 { - Ident => matches!(other.0, - Ident | Function | UrlOrBadUrl | DelimMinus | Number | Percentage | Dimension | - CDC | OpenParen), - AtKeywordOrHash | Dimension => matches!(other.0, - Ident | Function | UrlOrBadUrl | DelimMinus | Number | Percentage | Dimension | - CDC), - DelimHash | DelimMinus | Number => matches!(other.0, - Ident | Function | UrlOrBadUrl | DelimMinus | Number | Percentage | Dimension), - DelimAt => matches!(other.0, - Ident | Function | UrlOrBadUrl | DelimMinus), + Ident => matches!( + other.0, + Ident + | Function + | UrlOrBadUrl + | DelimMinus + | Number + | Percentage + | Dimension + | CDC + | OpenParen + ), + AtKeywordOrHash | Dimension => matches!( + other.0, + Ident | Function | UrlOrBadUrl | DelimMinus | Number | Percentage | Dimension | CDC + ), + DelimHash | DelimMinus => matches!( + other.0, + Ident | Function | UrlOrBadUrl | DelimMinus | Number | Percentage | Dimension + ), + Number => matches!( + other.0, + Ident | Function | UrlOrBadUrl | DelimMinus | Number | Percentage | DelimPercent | Dimension + ), + DelimAt => matches!(other.0, Ident | Function | UrlOrBadUrl | DelimMinus), DelimDotOrPlus => matches!(other.0, Number | Percentage | Dimension), DelimAssorted | DelimAsterisk => matches!(other.0, DelimEquals), DelimBar => matches!(other.0, DelimEquals | DelimBar | DashMatch), DelimSlash => matches!(other.0, DelimAsterisk | SubstringMatch), - Nothing | WhiteSpace | Percentage | UrlOrBadUrl | Function | CDC | OpenParen | - DashMatch | SubstringMatch | DelimQuestion | DelimEquals | Other => false, + Nothing | WhiteSpace | Percentage | UrlOrBadUrl | Function | CDC | OpenParen + | DashMatch | SubstringMatch | DelimQuestion | DelimEquals | DelimPercent | Other => false, } } } @@ -415,18 +478,19 @@ enum TokenSerializationTypeVariants { CDC, DashMatch, SubstringMatch, - OpenParen, // '(' - DelimHash, // '#' - DelimAt, // '@' - DelimDotOrPlus, // '.', '+' - DelimMinus, // '-' - DelimQuestion, // '?' - DelimAssorted, // '$', '^', '~' - DelimEquals, // '=' - DelimBar, // '|' - DelimSlash, // '/' - DelimAsterisk, // '*' - Other, // anything else + OpenParen, // '(' + DelimHash, // '#' + DelimAt, // '@' + DelimDotOrPlus, // '.', '+' + DelimMinus, // '-' + DelimQuestion, // '?' + DelimAssorted, // '$', '^', '~' + DelimEquals, // '=' + DelimBar, // '|' + DelimSlash, // '/' + DelimAsterisk, // '*' + DelimPercent, // '%' + Other, // anything else } impl<'a> Token<'a> { @@ -446,6 +510,7 @@ impl<'a> Token<'a> { Token::Delim('-') => DelimMinus, Token::Delim('?') => DelimQuestion, Token::Delim('$') | Token::Delim('^') | Token::Delim('~') => DelimAssorted, + Token::Delim('%') => DelimPercent, Token::Delim('=') => DelimEquals, Token::Delim('|') => DelimBar, Token::Delim('/') => DelimSlash, @@ -460,12 +525,21 @@ impl<'a> Token<'a> { Token::CDC => CDC, Token::Function(_) => Function, Token::ParenthesisBlock => OpenParen, - Token::SquareBracketBlock | Token::CurlyBracketBlock | - Token::CloseParenthesis | Token::CloseSquareBracket | Token::CloseCurlyBracket | - Token::QuotedString(_) | Token::BadString(_) | - Token::Delim(_) | Token::Colon | Token::Semicolon | Token::Comma | Token::CDO | - Token::IncludeMatch | Token::PrefixMatch | Token::SuffixMatch - => Other, + Token::SquareBracketBlock + | Token::CurlyBracketBlock + | Token::CloseParenthesis + | Token::CloseSquareBracket + | Token::CloseCurlyBracket + | Token::QuotedString(_) + | Token::BadString(_) + | Token::Delim(_) + | Token::Colon + | Token::Semicolon + | Token::Comma + | Token::CDO + | Token::IncludeMatch + | Token::PrefixMatch + | Token::SuffixMatch => Other, }) } } --- a/vendor/cssparser/src/size_of_tests.rs +++ b/vendor/cssparser/src/size_of_tests.rs @@ -6,7 +6,6 @@ use cow_rc_str::CowRcStr; use std::borrow::Cow; use tokenizer::Token; -#[macro_export] macro_rules! size_of_test { ($testname: ident, $t: ty, $expected_size: expr) => { #[test] @@ -17,18 +16,24 @@ macro_rules! size_of_test { panic!( "Your changes have decreased the stack size of {} from {} to {}. \ Good work! Please update the expected size in {}.", - stringify!($t), old, new, file!() + stringify!($t), + old, + new, + file!() ) } else if new > old { panic!( "Your changes have increased the stack size of {} from {} to {}. \ Please consider choosing a design which avoids this increase. \ If you feel that the increase is necessary, update the size in {}.", - stringify!($t), old, new, file!() + stringify!($t), + old, + new, + file!() ) } } - } + }; } // Some of these assume 64-bit @@ -37,10 +42,18 @@ size_of_test!(std_cow_str, Cow<'static, size_of_test!(cow_rc_str, CowRcStr, 16); size_of_test!(tokenizer, ::tokenizer::Tokenizer, 72); -size_of_test!(parser_input, ::parser::ParserInput, if cfg!(rustc_has_pr45225) { 136 } else { 144 }); +size_of_test!( + parser_input, + ::parser::ParserInput, + if cfg!(rustc_has_pr45225) { 136 } else { 144 } +); size_of_test!(parser, ::parser::Parser, 16); size_of_test!(source_position, ::SourcePosition, 8); size_of_test!(parser_state, ::ParserState, 24); size_of_test!(basic_parse_error, ::BasicParseError, 48); -size_of_test!(parse_error_lower_bound, ::ParseError<()>, if cfg!(rustc_has_pr45225) { 48 } else { 56 }); +size_of_test!( + parse_error_lower_bound, + ::ParseError<()>, + if cfg!(rustc_has_pr45225) { 48 } else { 56 } +); --- a/vendor/cssparser/src/tests.rs +++ b/vendor/cssparser/src/tests.rs @@ -6,54 +6,54 @@ extern crate test; use encoding_rs; -use rustc_serialize::json::{self, Json, ToJson}; +use serde_json::{self, Value, json, Map}; #[cfg(feature = "bench")] use self::test::Bencher; -use super::{Parser, Delimiter, Token, SourceLocation, - ParseError, ParseErrorKind, BasicParseError, BasicParseErrorKind, - DeclarationListParser, DeclarationParser, RuleListParser, - AtRuleType, AtRuleParser, QualifiedRuleParser, ParserInput, - parse_one_declaration, parse_one_rule, parse_important, - stylesheet_encoding, EncodingSupport, - TokenSerializationType, CowRcStr, - Color, RGBA, parse_nth, UnicodeRange, ToCss}; +use super::{ + parse_important, parse_nth, parse_one_declaration, parse_one_rule, stylesheet_encoding, + AtRuleParser, AtRuleType, BasicParseError, BasicParseErrorKind, Color, CowRcStr, + DeclarationListParser, DeclarationParser, Delimiter, EncodingSupport, ParseError, + ParseErrorKind, Parser, ParserInput, QualifiedRuleParser, RuleListParser, SourceLocation, + ToCss, Token, TokenSerializationType, UnicodeRange, RGBA, +}; macro_rules! JArray { ($($e: expr,)*) => { JArray![ $( $e ),* ] }; - ($($e: expr),*) => { Json::Array(vec!( $( $e.to_json() ),* )) } + ($($e: expr),*) => { Value::Array(vec!( $( $e.to_json() ),* )) } } -fn almost_equals(a: &Json, b: &Json) -> bool { +fn almost_equals(a: &Value, b: &Value) -> bool { match (a, b) { - (&Json::I64(a), _) => almost_equals(&Json::F64(a as f64), b), - (&Json::U64(a), _) => almost_equals(&Json::F64(a as f64), b), - (_, &Json::I64(b)) => almost_equals(a, &Json::F64(b as f64)), - (_, &Json::U64(b)) => almost_equals(a, &Json::F64(b as f64)), - - (&Json::F64(a), &Json::F64(b)) => (a - b).abs() <= a.abs() * 1e-6, - - (&Json::Boolean(a), &Json::Boolean(b)) => a == b, - (&Json::String(ref a), &Json::String(ref b)) => a == b, - (&Json::Array(ref a), &Json::Array(ref b)) => { - a.len() == b.len() && - a.iter().zip(b.iter()).all(|(ref a, ref b)| almost_equals(*a, *b)) + (&Value::Number(ref a), &Value::Number(ref b)) => { + let a = a.as_f64().unwrap(); + let b = b.as_f64().unwrap(); + (a - b).abs() <= a.abs() * 1e-6 }, - (&Json::Object(_), &Json::Object(_)) => panic!("Not implemented"), - (&Json::Null, &Json::Null) => true, + + (&Value::Bool(a), &Value::Bool(b)) => a == b, + (&Value::String(ref a), &Value::String(ref b)) => a == b, + (&Value::Array(ref a), &Value::Array(ref b)) => { + a.len() == b.len() + && a.iter() + .zip(b.iter()) + .all(|(ref a, ref b)| almost_equals(*a, *b)) + } + (&Value::Object(_), &Value::Object(_)) => panic!("Not implemented"), + (&Value::Null, &Value::Null) => true, _ => false, } } -fn normalize(json: &mut Json) { +fn normalize(json: &mut Value) { match *json { - Json::Array(ref mut list) => { + Value::Array(ref mut list) => { for item in list.iter_mut() { normalize(item) } } - Json::String(ref mut s) => { + Value::String(ref mut s) => { if *s == "extra-input" || *s == "empty" { *s = "invalid".to_string() } @@ -62,22 +62,25 @@ fn normalize(json: &mut Json) { } } -fn assert_json_eq(results: json::Json, mut expected: json::Json, message: &str) { +fn assert_json_eq(results: Value, mut expected: Value, message: &str) { normalize(&mut expected); if !almost_equals(&results, &expected) { - println!("{}", ::difference::Changeset::new( - &results.pretty().to_string(), - &expected.pretty().to_string(), - "\n", - )); + println!( + "{}", + ::difference::Changeset::new( + &serde_json::to_string_pretty(&results).unwrap(), + &serde_json::to_string_pretty(&expected).unwrap(), + "\n", + ) + ); panic!("{}", message) } } -fn run_raw_json_tests ()>(json_data: &str, run: F) { - let items = match Json::from_str(json_data) { - Ok(Json::Array(items)) => items, - _ => panic!("Invalid JSON") +fn run_raw_json_tests ()>(json_data: &str, run: F) { + let items = match serde_json::from_str(json_data) { + Ok(Value::Array(items)) => items, + _ => panic!("Invalid JSON"), }; assert!(items.len() % 2 == 0); let mut input = None; @@ -87,83 +90,89 @@ fn run_raw_json_tests { let input = input.take().unwrap(); run(input, expected) - }, + } }; } } - -fn run_json_tests Json>(json_data: &str, parse: F) { - run_raw_json_tests(json_data, |input, expected| { - match input { - Json::String(input) => { - let mut parse_input = ParserInput::new(&input); - let result = parse(&mut Parser::new(&mut parse_input)); - assert_json_eq(result, expected, &input); - }, - _ => panic!("Unexpected JSON") +fn run_json_tests Value>(json_data: &str, parse: F) { + run_raw_json_tests(json_data, |input, expected| match input { + Value::String(input) => { + let mut parse_input = ParserInput::new(&input); + let result = parse(&mut Parser::new(&mut parse_input)); + assert_json_eq(result, expected, &input); } + _ => panic!("Unexpected JSON"), }); } - #[test] fn component_value_list() { - run_json_tests(include_str!("css-parsing-tests/component_value_list.json"), |input| { - Json::Array(component_values_to_json(input)) - }); + run_json_tests( + include_str!("css-parsing-tests/component_value_list.json"), + |input| Value::Array(component_values_to_json(input)), + ); } - #[test] fn one_component_value() { - run_json_tests(include_str!("css-parsing-tests/one_component_value.json"), |input| { - let result: Result> = input.parse_entirely(|input| { - Ok(one_component_value_to_json(input.next()?.clone(), input)) - }); - result.unwrap_or(JArray!["error", "invalid"]) - }); + run_json_tests( + include_str!("css-parsing-tests/one_component_value.json"), + |input| { + let result: Result> = input.parse_entirely(|input| { + Ok(one_component_value_to_json(input.next()?.clone(), input)) + }); + result.unwrap_or(JArray!["error", "invalid"]) + }, + ); } - #[test] fn declaration_list() { - run_json_tests(include_str!("css-parsing-tests/declaration_list.json"), |input| { - Json::Array(DeclarationListParser::new(input, JsonParser).map(|result| { - result.unwrap_or(JArray!["error", "invalid"]) - }).collect()) - }); + run_json_tests( + include_str!("css-parsing-tests/declaration_list.json"), + |input| { + Value::Array( + DeclarationListParser::new(input, JsonParser) + .map(|result| result.unwrap_or(JArray!["error", "invalid"])) + .collect(), + ) + }, + ); } - #[test] fn one_declaration() { - run_json_tests(include_str!("css-parsing-tests/one_declaration.json"), |input| { - parse_one_declaration(input, &mut JsonParser).unwrap_or(JArray!["error", "invalid"]) - }); + run_json_tests( + include_str!("css-parsing-tests/one_declaration.json"), + |input| { + parse_one_declaration(input, &mut JsonParser).unwrap_or(JArray!["error", "invalid"]) + }, + ); } - #[test] fn rule_list() { run_json_tests(include_str!("css-parsing-tests/rule_list.json"), |input| { - Json::Array(RuleListParser::new_for_nested_rule(input, JsonParser).map(|result| { - result.unwrap_or(JArray!["error", "invalid"]) - }).collect()) + Value::Array( + RuleListParser::new_for_nested_rule(input, JsonParser) + .map(|result| result.unwrap_or(JArray!["error", "invalid"])) + .collect(), + ) }); } - #[test] fn stylesheet() { run_json_tests(include_str!("css-parsing-tests/stylesheet.json"), |input| { - Json::Array(RuleListParser::new_for_stylesheet(input, JsonParser).map(|result| { - result.unwrap_or(JArray!["error", "invalid"]) - }).collect()) + Value::Array( + RuleListParser::new_for_stylesheet(input, JsonParser) + .map(|result| result.unwrap_or(JArray!["error", "invalid"])) + .collect(), + ) }); } - #[test] fn one_rule() { run_json_tests(include_str!("css-parsing-tests/one_rule.json"), |input| { @@ -171,7 +180,6 @@ fn one_rule() { }); } - #[test] fn stylesheet_from_bytes() { pub struct EncodingRs; @@ -184,8 +192,7 @@ fn stylesheet_from_bytes() { } fn is_utf16_be_or_le(encoding: &Self::Encoding) -> bool { - *encoding == encoding_rs::UTF_16LE || - *encoding == encoding_rs::UTF_16BE + *encoding == encoding_rs::UTF_16LE || *encoding == encoding_rs::UTF_16BE } fn from_label(ascii_label: &[u8]) -> Option { @@ -193,49 +200,56 @@ fn stylesheet_from_bytes() { } } + run_raw_json_tests( + include_str!("css-parsing-tests/stylesheet_bytes.json"), + |input, expected| { + let map = match input { + Value::Object(map) => map, + _ => panic!("Unexpected JSON"), + }; - run_raw_json_tests(include_str!("css-parsing-tests/stylesheet_bytes.json"), - |input, expected| { - let map = match input { - Json::Object(map) => map, - _ => panic!("Unexpected JSON") - }; - - let result = { - let css = get_string(&map, "css_bytes").unwrap().chars().map(|c| { - assert!(c as u32 <= 0xFF); - c as u8 - }).collect::>(); - let protocol_encoding_label = get_string(&map, "protocol_encoding") - .map(|s| s.as_bytes()); - let environment_encoding = get_string(&map, "environment_encoding") - .map(|s| s.as_bytes()) - .and_then(EncodingRs::from_label); - - let encoding = stylesheet_encoding::( - &css, protocol_encoding_label, environment_encoding); - let (css_unicode, used_encoding, _) = encoding.decode(&css); - let mut input = ParserInput::new(&css_unicode); - let input = &mut Parser::new(&mut input); - let rules = RuleListParser::new_for_stylesheet(input, JsonParser) - .map(|result| result.unwrap_or(JArray!["error", "invalid"])) - .collect::>(); - JArray![rules, used_encoding.name().to_lowercase()] - }; - assert_json_eq(result, expected, &Json::Object(map).to_string()); - }); + let result = { + let css = get_string(&map, "css_bytes") + .unwrap() + .chars() + .map(|c| { + assert!(c as u32 <= 0xFF); + c as u8 + }) + .collect::>(); + let protocol_encoding_label = + get_string(&map, "protocol_encoding").map(|s| s.as_bytes()); + let environment_encoding = get_string(&map, "environment_encoding") + .map(|s| s.as_bytes()) + .and_then(EncodingRs::from_label); + + let encoding = stylesheet_encoding::( + &css, + protocol_encoding_label, + environment_encoding, + ); + let (css_unicode, used_encoding, _) = encoding.decode(&css); + let mut input = ParserInput::new(&css_unicode); + let input = &mut Parser::new(&mut input); + let rules = RuleListParser::new_for_stylesheet(input, JsonParser) + .map(|result| result.unwrap_or(JArray!["error", "invalid"])) + .collect::>(); + JArray![rules, used_encoding.name().to_lowercase()] + }; + assert_json_eq(result, expected, &Value::Object(map).to_string()); + }, + ); - fn get_string<'a>(map: &'a json::Object, key: &str) -> Option<&'a str> { + fn get_string<'a>(map: &'a Map, key: &str) -> Option<&'a str> { match map.get(key) { - Some(&Json::String(ref s)) => Some(s), - Some(&Json::Null) => None, + Some(&Value::String(ref s)) => Some(s), + Some(&Value::Null) => None, None => None, _ => panic!("Unexpected JSON"), } } } - #[test] fn expect_no_error_token() { let mut input = ParserInput::new("foo 4px ( / { !bar }"); @@ -256,16 +270,17 @@ fn expect_no_error_token() { assert!(Parser::new(&mut input).expect_no_error_token().is_err()); } - /// https://github.com/servo/rust-cssparser/issues/71 #[test] fn outer_block_end_consumed() { let mut input = ParserInput::new("(calc(true))"); let mut input = Parser::new(&mut input); assert!(input.expect_parenthesis_block().is_ok()); - assert!(input.parse_nested_block(|input| { - input.expect_function_matching("calc").map_err(Into::>::into) - }).is_ok()); + assert!(input + .parse_nested_block(|input| input + .expect_function_matching("calc") + .map_err(Into::>::into)) + .is_ok()); println!("{:?}", input.position()); assert!(input.next().is_err()); } @@ -275,7 +290,7 @@ fn outer_block_end_consumed() { fn bad_url_slice_out_of_bounds() { let mut input = ParserInput::new("url(\u{1}\\"); let mut parser = Parser::new(&mut input); - let result = parser.next_including_whitespace_and_comments(); // This used to panic + let result = parser.next_including_whitespace_and_comments(); // This used to panic assert_eq!(result, Ok(&Token::BadUrl("\u{1}\\".into()))); } @@ -284,27 +299,33 @@ fn bad_url_slice_out_of_bounds() { fn bad_url_slice_not_at_char_boundary() { let mut input = ParserInput::new("url(9\n۰"); let mut parser = Parser::new(&mut input); - let result = parser.next_including_whitespace_and_comments(); // This used to panic + let result = parser.next_including_whitespace_and_comments(); // This used to panic assert_eq!(result, Ok(&Token::BadUrl("9\n۰".into()))); } #[test] fn unquoted_url_escaping() { - let token = Token::UnquotedUrl("\ - \x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\ - \x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f \ - !\"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]\ - ^_`abcdefghijklmnopqrstuvwxyz{|}~\x7fé\ - ".into()); + let token = Token::UnquotedUrl( + "\ + \x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\ + \x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f \ + !\"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]\ + ^_`abcdefghijklmnopqrstuvwxyz{|}~\x7fé\ + " + .into(), + ); let serialized = token.to_css_string(); - assert_eq!(serialized, "\ - url(\ - \\1 \\2 \\3 \\4 \\5 \\6 \\7 \\8 \\9 \\a \\b \\c \\d \\e \\f \\10 \ - \\11 \\12 \\13 \\14 \\15 \\16 \\17 \\18 \\19 \\1a \\1b \\1c \\1d \\1e \\1f \\20 \ - !\\\"#$%&\\'\\(\\)*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]\ - ^_`abcdefghijklmnopqrstuvwxyz{|}~\\7f é\ - )\ - "); + assert_eq!( + serialized, + "\ + url(\ + \\1 \\2 \\3 \\4 \\5 \\6 \\7 \\8 \\9 \\a \\b \\c \\d \\e \\f \\10 \ + \\11 \\12 \\13 \\14 \\15 \\16 \\17 \\18 \\19 \\1a \\1b \\1c \\1d \\1e \\1f \\20 \ + !\\\"#$%&\\'\\(\\)*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]\ + ^_`abcdefghijklmnopqrstuvwxyz{|}~\\7f é\ + )\ + " + ); let mut input = ParserInput::new(&serialized); assert_eq!(Parser::new(&mut input).next(), Ok(&token)); } @@ -332,43 +353,48 @@ fn test_expect_url() { assert!(parse(&mut input).is_err()); } - -fn run_color_tests) -> Json>(json_data: &str, to_json: F) { +fn run_color_tests) -> Value>(json_data: &str, to_json: F) { run_json_tests(json_data, |input| { - let result: Result<_, ParseError<()>> = input.parse_entirely(|i| { - Color::parse(i).map_err(Into::into) - }); + let result: Result<_, ParseError<()>> = + input.parse_entirely(|i| Color::parse(i).map_err(Into::into)); to_json(result.map_err(|_| ())) }); } - #[test] fn color3() { - run_color_tests(include_str!("css-parsing-tests/color3.json"), |c| c.ok().to_json()) + run_color_tests(include_str!("css-parsing-tests/color3.json"), |c| { + c.ok().map(|v| v.to_json()).unwrap_or(Value::Null) + }) } - #[test] fn color3_hsl() { - run_color_tests(include_str!("css-parsing-tests/color3_hsl.json"), |c| c.ok().to_json()) + run_color_tests(include_str!("css-parsing-tests/color3_hsl.json"), |c| { + c.ok().map(|v| v.to_json()).unwrap_or(Value::Null) + }) } - /// color3_keywords.json is different: R, G and B are in 0..255 rather than 0..1 #[test] fn color3_keywords() { - run_color_tests(include_str!("css-parsing-tests/color3_keywords.json"), |c| c.ok().to_json()) + run_color_tests( + include_str!("css-parsing-tests/color3_keywords.json"), + |c| c.ok().map(|v| v.to_json()).unwrap_or(Value::Null), + ) } - #[test] fn nth() { run_json_tests(include_str!("css-parsing-tests/An+B.json"), |input| { - input.parse_entirely(|i| { - let result: Result<_, ParseError<()>> = parse_nth(i).map_err(Into::into); - result - }).ok().to_json() + input + .parse_entirely(|i| { + let result: Result<_, ParseError<()>> = parse_nth(i).map_err(Into::into); + result + }) + .ok() + .map(|(v0, v1)| json!([v0, v1])) + .unwrap_or(Value::Null) }); } @@ -384,11 +410,20 @@ fn unicode_range() { Ok(None) } }); - result.unwrap().to_json() + result.unwrap() + .iter() + .map(|v| + if let Some((v0, v1)) = v{ + json!([v0, v1]) + } else { + Value::Null + } + ) + .collect::>() + .to_json() }); } - #[test] fn serializer_not_preserving_comments() { serializer(false) @@ -400,44 +435,59 @@ fn serializer_preserving_comments() { } fn serializer(preserve_comments: bool) { - run_json_tests(include_str!("css-parsing-tests/component_value_list.json"), |input| { - fn write_to(mut previous_token: TokenSerializationType, - input: &mut Parser, - string: &mut String, - preserve_comments: bool) { - while let Ok(token) = if preserve_comments { - input.next_including_whitespace_and_comments().map(|t| t.clone()) - } else { - input.next_including_whitespace().map(|t| t.clone()) - } { - let token_type = token.serialization_type(); - if !preserve_comments && previous_token.needs_separator_when_before(token_type) { - string.push_str("/**/") - } - previous_token = token_type; - token.to_css(string).unwrap(); - let closing_token = match token { - Token::Function(_) | Token::ParenthesisBlock => Some(Token::CloseParenthesis), - Token::SquareBracketBlock => Some(Token::CloseSquareBracket), - Token::CurlyBracketBlock => Some(Token::CloseCurlyBracket), - _ => None - }; - if let Some(closing_token) = closing_token { - let result: Result<_, ParseError<()>> = input.parse_nested_block(|input| { - write_to(previous_token, input, string, preserve_comments); - Ok(()) - }); - result.unwrap(); - closing_token.to_css(string).unwrap(); + run_json_tests( + include_str!("css-parsing-tests/component_value_list.json"), + |input| { + fn write_to( + mut previous_token: TokenSerializationType, + input: &mut Parser, + string: &mut String, + preserve_comments: bool, + ) { + while let Ok(token) = if preserve_comments { + input + .next_including_whitespace_and_comments() + .map(|t| t.clone()) + } else { + input.next_including_whitespace().map(|t| t.clone()) + } { + let token_type = token.serialization_type(); + if !preserve_comments && previous_token.needs_separator_when_before(token_type) + { + string.push_str("/**/") + } + previous_token = token_type; + token.to_css(string).unwrap(); + let closing_token = match token { + Token::Function(_) | Token::ParenthesisBlock => { + Some(Token::CloseParenthesis) + } + Token::SquareBracketBlock => Some(Token::CloseSquareBracket), + Token::CurlyBracketBlock => Some(Token::CloseCurlyBracket), + _ => None, + }; + if let Some(closing_token) = closing_token { + let result: Result<_, ParseError<()>> = input.parse_nested_block(|input| { + write_to(previous_token, input, string, preserve_comments); + Ok(()) + }); + result.unwrap(); + closing_token.to_css(string).unwrap(); + } } } - } - let mut serialized = String::new(); - write_to(TokenSerializationType::nothing(), input, &mut serialized, preserve_comments); - let mut input = ParserInput::new(&serialized); - let parser = &mut Parser::new(&mut input); - Json::Array(component_values_to_json(parser)) - }); + let mut serialized = String::new(); + write_to( + TokenSerializationType::nothing(), + input, + &mut serialized, + preserve_comments, + ); + let mut input = ParserInput::new(&serialized); + let parser = &mut Parser::new(&mut input); + Value::Array(component_values_to_json(parser)) + }, + ); } #[test] @@ -497,36 +547,90 @@ fn line_numbers() { "b\"" )); let mut input = Parser::new(&mut input); - assert_eq!(input.current_source_location(), SourceLocation { line: 0, column: 1 }); - assert_eq!(input.next_including_whitespace(), Ok(&Token::Ident("fo00o".into()))); - assert_eq!(input.current_source_location(), SourceLocation { line: 1, column: 3 }); - assert_eq!(input.next_including_whitespace(), Ok(&Token::WhiteSpace(" "))); - assert_eq!(input.current_source_location(), SourceLocation { line: 1, column: 4 }); - assert_eq!(input.next_including_whitespace(), Ok(&Token::Ident("bar".into()))); - assert_eq!(input.current_source_location(), SourceLocation { line: 1, column: 7 }); - assert_eq!(input.next_including_whitespace_and_comments(), Ok(&Token::Comment("\n"))); - assert_eq!(input.current_source_location(), SourceLocation { line: 2, column: 3 }); - assert_eq!(input.next_including_whitespace(), Ok(&Token::Ident("baz".into()))); - assert_eq!(input.current_source_location(), SourceLocation { line: 2, column: 6 }); + assert_eq!( + input.current_source_location(), + SourceLocation { line: 0, column: 1 } + ); + assert_eq!( + input.next_including_whitespace(), + Ok(&Token::Ident("fo00o".into())) + ); + assert_eq!( + input.current_source_location(), + SourceLocation { line: 1, column: 3 } + ); + assert_eq!( + input.next_including_whitespace(), + Ok(&Token::WhiteSpace(" ")) + ); + assert_eq!( + input.current_source_location(), + SourceLocation { line: 1, column: 4 } + ); + assert_eq!( + input.next_including_whitespace(), + Ok(&Token::Ident("bar".into())) + ); + assert_eq!( + input.current_source_location(), + SourceLocation { line: 1, column: 7 } + ); + assert_eq!( + input.next_including_whitespace_and_comments(), + Ok(&Token::Comment("\n")) + ); + assert_eq!( + input.current_source_location(), + SourceLocation { line: 2, column: 3 } + ); + assert_eq!( + input.next_including_whitespace(), + Ok(&Token::Ident("baz".into())) + ); + assert_eq!( + input.current_source_location(), + SourceLocation { line: 2, column: 6 } + ); let state = input.state(); - assert_eq!(input.next_including_whitespace(), Ok(&Token::WhiteSpace("\r\n\n"))); - assert_eq!(input.current_source_location(), SourceLocation { line: 4, column: 1 }); - - assert_eq!(state.source_location(), SourceLocation { line: 2, column: 6 }); - - assert_eq!(input.next_including_whitespace(), Ok(&Token::UnquotedUrl("u".into()))); - assert_eq!(input.current_source_location(), SourceLocation { line: 6, column: 2 }); - - assert_eq!(input.next_including_whitespace(), Ok(&Token::QuotedString("ab".into()))); - assert_eq!(input.current_source_location(), SourceLocation { line: 7, column: 3 }); + assert_eq!( + input.next_including_whitespace(), + Ok(&Token::WhiteSpace("\r\n\n")) + ); + assert_eq!( + input.current_source_location(), + SourceLocation { line: 4, column: 1 } + ); + + assert_eq!( + state.source_location(), + SourceLocation { line: 2, column: 6 } + ); + + assert_eq!( + input.next_including_whitespace(), + Ok(&Token::UnquotedUrl("u".into())) + ); + assert_eq!( + input.current_source_location(), + SourceLocation { line: 6, column: 2 } + ); + + assert_eq!( + input.next_including_whitespace(), + Ok(&Token::QuotedString("ab".into())) + ); + assert_eq!( + input.current_source_location(), + SourceLocation { line: 7, column: 3 } + ); assert!(input.next_including_whitespace().is_err()); } #[test] fn overflow() { - use std::iter::repeat; use std::f32; + use std::iter::repeat; let css = r" 2147483646 @@ -551,7 +655,8 @@ fn overflow() { -3.40282347e+38 -3.402824e+38 - ".replace("{309 zeros}", &repeat('0').take(309).collect::()); + " + .replace("{309 zeros}", &repeat('0').take(309).collect::()); let mut input = ParserInput::new(&css); let mut input = Parser::new(&mut input); @@ -586,9 +691,11 @@ fn line_delimited() { let mut input = Parser::new(&mut input); assert_eq!(input.next(), Ok(&Token::CurlyBracketBlock)); assert!({ - let result: Result<_, ParseError<()>> = input.parse_until_after(Delimiter::Semicolon, |_| Ok(42)); + let result: Result<_, ParseError<()>> = + input.parse_until_after(Delimiter::Semicolon, |_| Ok(42)); result - }.is_err()); + } + .is_err()); assert_eq!(input.next(), Ok(&Token::Comma)); assert!(input.next().is_err()); } @@ -603,9 +710,18 @@ fn identifier_serialization() { // Replacement character assert_eq!(Token::Ident("\u{FFFD}".into()).to_css_string(), "\u{FFFD}"); - assert_eq!(Token::Ident("a\u{FFFD}".into()).to_css_string(), "a\u{FFFD}"); - assert_eq!(Token::Ident("\u{FFFD}b".into()).to_css_string(), "\u{FFFD}b"); - assert_eq!(Token::Ident("a\u{FFFD}b".into()).to_css_string(), "a\u{FFFD}b"); + assert_eq!( + Token::Ident("a\u{FFFD}".into()).to_css_string(), + "a\u{FFFD}" + ); + assert_eq!( + Token::Ident("\u{FFFD}b".into()).to_css_string(), + "\u{FFFD}b" + ); + assert_eq!( + Token::Ident("a\u{FFFD}b".into()).to_css_string(), + "a\u{FFFD}b" + ); // Number prefix assert_eq!(Token::Ident("0a".into()).to_css_string(), "\\30 a"); @@ -647,35 +763,74 @@ fn identifier_serialization() { assert_eq!(Token::Ident("--a".into()).to_css_string(), "--a"); // Various tests - assert_eq!(Token::Ident("\x01\x02\x1E\x1F".into()).to_css_string(), "\\1 \\2 \\1e \\1f "); - assert_eq!(Token::Ident("\u{0080}\x2D\x5F\u{00A9}".into()).to_css_string(), "\u{0080}\x2D\x5F\u{00A9}"); + assert_eq!( + Token::Ident("\x01\x02\x1E\x1F".into()).to_css_string(), + "\\1 \\2 \\1e \\1f " + ); + assert_eq!( + Token::Ident("\u{0080}\x2D\x5F\u{00A9}".into()).to_css_string(), + "\u{0080}\x2D\x5F\u{00A9}" + ); assert_eq!(Token::Ident("\x7F\u{0080}\u{0081}\u{0082}\u{0083}\u{0084}\u{0085}\u{0086}\u{0087}\u{0088}\u{0089}\ \u{008A}\u{008B}\u{008C}\u{008D}\u{008E}\u{008F}\u{0090}\u{0091}\u{0092}\u{0093}\u{0094}\u{0095}\u{0096}\ \u{0097}\u{0098}\u{0099}\u{009A}\u{009B}\u{009C}\u{009D}\u{009E}\u{009F}".into()).to_css_string(), "\\7f \u{0080}\u{0081}\u{0082}\u{0083}\u{0084}\u{0085}\u{0086}\u{0087}\u{0088}\u{0089}\u{008A}\u{008B}\u{008C}\ \u{008D}\u{008E}\u{008F}\u{0090}\u{0091}\u{0092}\u{0093}\u{0094}\u{0095}\u{0096}\u{0097}\u{0098}\u{0099}\ \u{009A}\u{009B}\u{009C}\u{009D}\u{009E}\u{009F}"); - assert_eq!(Token::Ident("\u{00A0}\u{00A1}\u{00A2}".into()).to_css_string(), "\u{00A0}\u{00A1}\u{00A2}"); - assert_eq!(Token::Ident("a0123456789b".into()).to_css_string(), "a0123456789b"); - assert_eq!(Token::Ident("abcdefghijklmnopqrstuvwxyz".into()).to_css_string(), "abcdefghijklmnopqrstuvwxyz"); - assert_eq!(Token::Ident("ABCDEFGHIJKLMNOPQRSTUVWXYZ".into()).to_css_string(), "ABCDEFGHIJKLMNOPQRSTUVWXYZ"); - assert_eq!(Token::Ident("\x20\x21\x78\x79".into()).to_css_string(), "\\ \\!xy"); + assert_eq!( + Token::Ident("\u{00A0}\u{00A1}\u{00A2}".into()).to_css_string(), + "\u{00A0}\u{00A1}\u{00A2}" + ); + assert_eq!( + Token::Ident("a0123456789b".into()).to_css_string(), + "a0123456789b" + ); + assert_eq!( + Token::Ident("abcdefghijklmnopqrstuvwxyz".into()).to_css_string(), + "abcdefghijklmnopqrstuvwxyz" + ); + assert_eq!( + Token::Ident("ABCDEFGHIJKLMNOPQRSTUVWXYZ".into()).to_css_string(), + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + ); + assert_eq!( + Token::Ident("\x20\x21\x78\x79".into()).to_css_string(), + "\\ \\!xy" + ); // astral symbol (U+1D306 TETRAGRAM FOR CENTRE) - assert_eq!(Token::Ident("\u{1D306}".into()).to_css_string(), "\u{1D306}"); + assert_eq!( + Token::Ident("\u{1D306}".into()).to_css_string(), + "\u{1D306}" + ); +} + +trait ToJson { + fn to_json(&self) -> Value; +} + +impl ToJson for T where T: Clone, Value: From { + fn to_json(&self) -> Value { + Value::from(self.clone()) + } } impl ToJson for Color { - fn to_json(&self) -> json::Json { + fn to_json(&self) -> Value { match *self { - Color::RGBA(ref rgba) => { - [rgba.red, rgba.green, rgba.blue, rgba.alpha].to_json() - }, + Color::RGBA(ref rgba) => json!([rgba.red, rgba.green, rgba.blue, rgba.alpha]), Color::CurrentColor => "currentcolor".to_json(), } } } +impl<'a> ToJson for CowRcStr<'a> { + fn to_json(&self) -> Value { + let s: &str = &*self; + s.to_json() + } +} + #[cfg(feature = "bench")] const BACKGROUND_IMAGE: &'static str = include_str!("big-data-url.css"); @@ -685,18 +840,17 @@ fn unquoted_url(b: &mut Bencher) { b.iter(|| { let mut input = ParserInput::new(BACKGROUND_IMAGE); let mut input = Parser::new(&mut input); - input.look_for_var_functions(); + input.look_for_var_or_env_functions(); - let result = input.try(|input| input.expect_url()); + let result = input.try_parse(|input| input.expect_url()); assert!(result.is_ok()); - input.seen_var_functions(); - (result.is_ok(), input.seen_var_functions()) + input.seen_var_or_env_functions(); + (result.is_ok(), input.seen_var_or_env_functions()) }) } - #[cfg(feature = "bench")] #[bench] fn numeric(b: &mut Bencher) { @@ -720,15 +874,18 @@ fn no_stack_overflow_multiple_nested_blo } let mut input = ParserInput::new(&input); let mut input = Parser::new(&mut input); - while let Ok(..) = input.next() { } + while let Ok(..) = input.next() {} } impl<'i> DeclarationParser<'i> for JsonParser { - type Declaration = Json; + type Declaration = Value; type Error = (); - fn parse_value<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>) - -> Result> { + fn parse_value<'t>( + &mut self, + name: CowRcStr<'i>, + input: &mut Parser<'i, 't>, + ) -> Result> { let mut value = vec![]; let mut important = false; loop { @@ -743,7 +900,7 @@ impl<'i> DeclarationParser<'i> for JsonP if parse_important(input).is_ok() { if input.is_exhausted() { important = true; - break + break; } } input.reset(&start); @@ -751,30 +908,28 @@ impl<'i> DeclarationParser<'i> for JsonP } value.push(one_component_value_to_json(token, input)); } else { - break + break; } } - Ok(JArray![ - "declaration", - name, - value, - important, - ]) + Ok(JArray!["declaration", name, value, important,]) } } impl<'i> AtRuleParser<'i> for JsonParser { - type PreludeNoBlock = Vec; - type PreludeBlock = Vec; - type AtRule = Json; + type PreludeNoBlock = Vec; + type PreludeBlock = Vec; + type AtRule = Value; type Error = (); - fn parse_prelude<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>) - -> Result, Vec>, ParseError<'i, ()>> { + fn parse_prelude<'t>( + &mut self, + name: CowRcStr<'i>, + input: &mut Parser<'i, 't>, + ) -> Result, Vec>, ParseError<'i, ()>> { let prelude = vec![ "at-rule".to_json(), name.to_json(), - Json::Array(component_values_to_json(input)), + Value::Array(component_values_to_json(input)), ]; match_ignore_ascii_case! { &*name, "media" | "foo-with-block" => Ok(AtRuleType::WithBlock(prelude)), @@ -785,41 +940,40 @@ impl<'i> AtRuleParser<'i> for JsonParser } } - fn rule_without_block( - &mut self, - mut prelude: Vec, - _location: SourceLocation, - ) -> Json { - prelude.push(Json::Null); - Json::Array(prelude) + fn rule_without_block(&mut self, mut prelude: Vec, _location: SourceLocation) -> Value { + prelude.push(Value::Null); + Value::Array(prelude) } fn parse_block<'t>( &mut self, - mut prelude: Vec, + mut prelude: Vec, _location: SourceLocation, input: &mut Parser<'i, 't>, - ) -> Result> { - prelude.push(Json::Array(component_values_to_json(input))); - Ok(Json::Array(prelude)) + ) -> Result> { + prelude.push(Value::Array(component_values_to_json(input))); + Ok(Value::Array(prelude)) } } impl<'i> QualifiedRuleParser<'i> for JsonParser { - type Prelude = Vec; - type QualifiedRule = Json; + type Prelude = Vec; + type QualifiedRule = Value; type Error = (); - fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>) -> Result, ParseError<'i, ()>> { + fn parse_prelude<'t>( + &mut self, + input: &mut Parser<'i, 't>, + ) -> Result, ParseError<'i, ()>> { Ok(component_values_to_json(input)) } fn parse_block<'t>( &mut self, - prelude: Vec, + prelude: Vec, _location: SourceLocation, input: &mut Parser<'i, 't>, - ) -> Result> { + ) -> Result> { Ok(JArray![ "qualified rule", prelude, @@ -828,7 +982,7 @@ impl<'i> QualifiedRuleParser<'i> for Jso } } -fn component_values_to_json(input: &mut Parser) -> Vec { +fn component_values_to_json(input: &mut Parser) -> Vec { let mut values = vec![]; while let Ok(token) = input.next_including_whitespace().map(|t| t.clone()) { values.push(one_component_value_to_json(token, input)); @@ -836,23 +990,31 @@ fn component_values_to_json(input: &mut values } -fn one_component_value_to_json(token: Token, input: &mut Parser) -> Json { - fn numeric(value: f32, int_value: Option, has_sign: bool) -> Vec { +fn one_component_value_to_json(token: Token, input: &mut Parser) -> Value { + fn numeric(value: f32, int_value: Option, has_sign: bool) -> Vec { vec![ Token::Number { value: value, int_value: int_value, has_sign: has_sign, - }.to_css_string().to_json(), - match int_value { Some(i) => i.to_json(), None => value.to_json() }, - match int_value { Some(_) => "integer", None => "number" }.to_json() + } + .to_css_string() + .to_json(), + match int_value { + Some(i) => i.to_json(), + None => value.to_json(), + }, + match int_value { + Some(_) => "integer", + None => "number", + } + .to_json(), ] } - fn nested(input: &mut Parser) -> Vec { - let result: Result<_, ParseError<()>> = input.parse_nested_block(|input| { - Ok(component_values_to_json(input)) - }); + fn nested(input: &mut Parser) -> Vec { + let result: Result<_, ParseError<()>> = + input.parse_nested_block(|input| Ok(component_values_to_json(input))); result.unwrap() } @@ -866,17 +1028,30 @@ fn one_component_value_to_json(token: To Token::Delim('\\') => "\\".to_json(), Token::Delim(value) => value.to_string().to_json(), - Token::Number { value, int_value, has_sign } => Json::Array({ + Token::Number { + value, + int_value, + has_sign, + } => Value::Array({ let mut v = vec!["number".to_json()]; v.extend(numeric(value, int_value, has_sign)); v }), - Token::Percentage { unit_value, int_value, has_sign } => Json::Array({ + Token::Percentage { + unit_value, + int_value, + has_sign, + } => Value::Array({ let mut v = vec!["percentage".to_json()]; v.extend(numeric(unit_value * 100., int_value, has_sign)); v }), - Token::Dimension { value, int_value, has_sign, unit } => Json::Array({ + Token::Dimension { + value, + int_value, + has_sign, + unit, + } => Value::Array({ let mut v = vec!["dimension".to_json()]; v.extend(numeric(value, int_value, has_sign)); v.push(unit.to_json()); @@ -896,22 +1071,22 @@ fn one_component_value_to_json(token: To Token::CDO => "".to_json(), - Token::Function(name) => Json::Array({ + Token::Function(name) => Value::Array({ let mut v = vec!["function".to_json(), name.to_json()]; v.extend(nested(input)); v }), - Token::ParenthesisBlock => Json::Array({ + Token::ParenthesisBlock => Value::Array({ let mut v = vec!["()".to_json()]; v.extend(nested(input)); v }), - Token::SquareBracketBlock => Json::Array({ + Token::SquareBracketBlock => Value::Array({ let mut v = vec!["[]".to_json()]; v.extend(nested(input)); v }), - Token::CurlyBracketBlock => Json::Array({ + Token::CurlyBracketBlock => Value::Array({ let mut v = vec!["{}".to_json()]; v.extend(nested(input)); v @@ -955,12 +1130,13 @@ fn parse_until_before_stops_at_delimiter // For all j and k, inputs[i].1[j] should parse the same as inputs[i].1[k] // when we use delimiters inputs[i].0. let inputs = vec![ - (Delimiter::Bang | Delimiter::Semicolon, - // Note that the ';extra' is fine, because the ';' acts the same as - // the end of input. - vec!["token stream;extra", "token stream!", "token stream"]), - (Delimiter::Bang | Delimiter::Semicolon, - vec![";", "!", ""]), + ( + Delimiter::Bang | Delimiter::Semicolon, + // Note that the ';extra' is fine, because the ';' acts the same as + // the end of input. + vec!["token stream;extra", "token stream!", "token stream"], + ), + (Delimiter::Bang | Delimiter::Semicolon, vec![";", "!", ""]), ]; for equivalent in inputs { for (j, x) in equivalent.1.iter().enumerate() { @@ -978,7 +1154,7 @@ fn parse_until_before_stops_at_delimiter let oy = iy.next(); assert_eq!(ox, oy); if let Err(_) = ox { - break + break; } } Ok(()) @@ -1012,14 +1188,46 @@ fn parser_maintains_current_line() { fn parser_with_line_number_offset() { let mut input = ParserInput::new_with_line_number_offset("ident\nident", 72); let mut parser = Parser::new(&mut input); - assert_eq!(parser.current_source_location(), SourceLocation { line: 72, column: 1 }); - assert_eq!(parser.next_including_whitespace_and_comments(), Ok(&Token::Ident("ident".into()))); - assert_eq!(parser.current_source_location(), SourceLocation { line: 72, column: 6 }); - assert_eq!(parser.next_including_whitespace_and_comments(), - Ok(&Token::WhiteSpace("\n".into()))); - assert_eq!(parser.current_source_location(), SourceLocation { line: 73, column: 1 }); - assert_eq!(parser.next_including_whitespace_and_comments(), Ok(&Token::Ident("ident".into()))); - assert_eq!(parser.current_source_location(), SourceLocation { line: 73, column: 6 }); + assert_eq!( + parser.current_source_location(), + SourceLocation { + line: 72, + column: 1 + } + ); + assert_eq!( + parser.next_including_whitespace_and_comments(), + Ok(&Token::Ident("ident".into())) + ); + assert_eq!( + parser.current_source_location(), + SourceLocation { + line: 72, + column: 6 + } + ); + assert_eq!( + parser.next_including_whitespace_and_comments(), + Ok(&Token::WhiteSpace("\n".into())) + ); + assert_eq!( + parser.current_source_location(), + SourceLocation { + line: 73, + column: 1 + } + ); + assert_eq!( + parser.next_including_whitespace_and_comments(), + Ok(&Token::Ident("ident".into())) + ); + assert_eq!( + parser.current_source_location(), + SourceLocation { + line: 73, + column: 6 + } + ); } #[test] @@ -1028,23 +1236,31 @@ fn cdc_regression_test() { let mut parser = Parser::new(&mut input); parser.skip_cdc_and_cdo(); assert_eq!(parser.next(), Ok(&Token::Ident("x".into()))); - assert_eq!(parser.next(), Err(BasicParseError { - kind: BasicParseErrorKind::EndOfInput, - location: SourceLocation { line: 0, column: 5 } - })); + assert_eq!( + parser.next(), + Err(BasicParseError { + kind: BasicParseErrorKind::EndOfInput, + location: SourceLocation { line: 0, column: 5 } + }) + ); } #[test] fn parse_entirely_reports_first_error() { #[derive(PartialEq, Debug)] - enum E { Foo } + enum E { + Foo, + } let mut input = ParserInput::new("ident"); let mut parser = Parser::new(&mut input); let result: Result<(), _> = parser.parse_entirely(|p| Err(p.new_custom_error(E::Foo))); - assert_eq!(result, Err(ParseError { - kind: ParseErrorKind::Custom(E::Foo), - location: SourceLocation { line: 0, column: 1 }, - })); + assert_eq!( + result, + Err(ParseError { + kind: ParseErrorKind::Custom(E::Foo), + location: SourceLocation { line: 0, column: 1 }, + }) + ); } #[test] @@ -1053,21 +1269,23 @@ fn parse_sourcemapping_comments() { ("/*# sourceMappingURL=here*/", Some("here")), ("/*# sourceMappingURL=here */", Some("here")), ("/*@ sourceMappingURL=here*/", Some("here")), - ("/*@ sourceMappingURL=there*/ /*# sourceMappingURL=here*/", Some("here")), + ( + "/*@ sourceMappingURL=there*/ /*# sourceMappingURL=here*/", + Some("here"), + ), ("/*# sourceMappingURL=here there */", Some("here")), ("/*# sourceMappingURL= here */", Some("")), ("/*# sourceMappingURL=*/", Some("")), ("/*# sourceMappingUR=here */", None), ("/*! sourceMappingURL=here */", None), ("/*# sourceMappingURL = here */", None), - ("/* # sourceMappingURL=here */", None) + ("/* # sourceMappingURL=here */", None), ]; for test in tests { let mut input = ParserInput::new(test.0); let mut parser = Parser::new(&mut input); - while let Ok(_) = parser.next_including_whitespace() { - } + while let Ok(_) = parser.next_including_whitespace() {} assert_eq!(parser.current_source_map_url(), test.1); } } @@ -1085,14 +1303,13 @@ fn parse_sourceurl_comments() { ("/*# sourceMappingUR=here */", None), ("/*! sourceURL=here */", None), ("/*# sourceURL = here */", None), - ("/* # sourceURL=here */", None) + ("/* # sourceURL=here */", None), ]; for test in tests { let mut input = ParserInput::new(test.0); let mut parser = Parser::new(&mut input); - while let Ok(_) = parser.next_including_whitespace() { - } + while let Ok(_) = parser.next_including_whitespace() {} assert_eq!(parser.current_source_url(), test.1); } } @@ -1158,8 +1375,15 @@ fn utf16_columns() { // Read all tokens. loop { match parser.next() { - Err(BasicParseError { kind: BasicParseErrorKind::EndOfInput, .. }) => { break; } - Err(_) => { assert!(false); } + Err(BasicParseError { + kind: BasicParseErrorKind::EndOfInput, + .. + }) => { + break; + } + Err(_) => { + assert!(false); + } Ok(_) => {} }; } --- a/vendor/cssparser/src/tokenizer.rs +++ b/vendor/cssparser/src/tokenizer.rs @@ -4,15 +4,13 @@ // https://drafts.csswg.org/css-syntax/#tokenization -use std::ops::Range; use std::char; -#[allow(unused_imports)] use std::ascii::AsciiExt; use std::i32; +use std::ops::Range; -use parser::ParserState; -use cow_rc_str::CowRcStr; use self::Token::*; - +use cow_rc_str::CowRcStr; +use parser::ParserState; /// One of the pieces the CSS input is broken into. /// @@ -20,7 +18,6 @@ use self::Token::*; /// and avoid allocating/copying when possible. #[derive(PartialEq, Debug, Clone)] pub enum Token<'a> { - /// A [``](https://drafts.csswg.org/css-syntax/#ident-token-diagram) Ident(CowRcStr<'a>), @@ -37,7 +34,7 @@ pub enum Token<'a> { /// A [``](https://drafts.csswg.org/css-syntax/#hash-token-diagram) with the type flag set to "id" /// /// The value does not include the `#` marker. - IDHash(CowRcStr<'a>), // Hash that is a valid ID selector. + IDHash(CowRcStr<'a>), // Hash that is a valid ID selector. /// A [``](https://drafts.csswg.org/css-syntax/#string-token-diagram) /// @@ -94,7 +91,7 @@ pub enum Token<'a> { int_value: Option, /// The unit, e.g. "px" in `12px` - unit: CowRcStr<'a> + unit: CowRcStr<'a>, }, /// A [``](https://drafts.csswg.org/css-syntax/#whitespace-token-diagram) @@ -109,13 +106,13 @@ pub enum Token<'a> { Comment(&'a str), /// A `:` `` - Colon, // : + Colon, // : /// A `;` `` - Semicolon, // ; + Semicolon, // ; /// A `,` `` - Comma, // , + Comma, // , /// A `~=` [``](https://drafts.csswg.org/css-syntax/#include-match-token-diagram) IncludeMatch, @@ -181,7 +178,6 @@ pub enum Token<'a> { CloseCurlyBracket, } - impl<'a> Token<'a> { /// Return whether this token represents a parse error. /// @@ -197,7 +193,6 @@ impl<'a> Token<'a> { } } - #[derive(Clone)] pub struct Tokenizer<'a> { input: &'a str, @@ -208,7 +203,7 @@ pub struct Tokenizer<'a> { /// of UTF-16 characters. current_line_start_position: usize, current_line_number: u32, - var_functions: SeenStatus, + var_or_env_functions: SeenStatus, source_map_url: Option<&'a str>, source_url: Option<&'a str>, } @@ -220,7 +215,6 @@ enum SeenStatus { SeenAtLeastOne, } - impl<'a> Tokenizer<'a> { #[inline] pub fn new(input: &str) -> Tokenizer { @@ -234,29 +228,29 @@ impl<'a> Tokenizer<'a> { position: 0, current_line_start_position: 0, current_line_number: first_line_number, - var_functions: SeenStatus::DontCare, + var_or_env_functions: SeenStatus::DontCare, source_map_url: None, source_url: None, } } #[inline] - pub fn look_for_var_functions(&mut self) { - self.var_functions = SeenStatus::LookingForThem; + pub fn look_for_var_or_env_functions(&mut self) { + self.var_or_env_functions = SeenStatus::LookingForThem; } #[inline] - pub fn seen_var_functions(&mut self) -> bool { - let seen = self.var_functions == SeenStatus::SeenAtLeastOne; - self.var_functions = SeenStatus::DontCare; + pub fn seen_var_or_env_functions(&mut self) -> bool { + let seen = self.var_or_env_functions == SeenStatus::SeenAtLeastOne; + self.var_or_env_functions = SeenStatus::DontCare; seen } #[inline] pub fn see_function(&mut self, name: &str) { - if self.var_functions == SeenStatus::LookingForThem { - if name.eq_ignore_ascii_case("var") { - self.var_functions = SeenStatus::SeenAtLeastOne; + if self.var_or_env_functions == SeenStatus::LookingForThem { + if name.eq_ignore_ascii_case("var") || name.eq_ignore_ascii_case("env") { + self.var_or_env_functions = SeenStatus::SeenAtLeastOne; } } } @@ -338,12 +332,16 @@ impl<'a> Tokenizer<'a> { // If false, `tokenizer.next_char()` will not panic. #[inline] - fn is_eof(&self) -> bool { !self.has_at_least(0) } + fn is_eof(&self) -> bool { + !self.has_at_least(0) + } // If true, the input has at least `n` bytes left *after* the current one. // That is, `tokenizer.char_at(n)` will not panic. #[inline] - fn has_at_least(&self, n: usize) -> bool { self.position + n < self.input.len() } + fn has_at_least(&self, n: usize) -> bool { + self.position + n < self.input.len() + } // Advance over N bytes in the input. This function can advance // over ASCII bytes (excluding newlines), or UTF-8 sequence @@ -365,7 +363,9 @@ impl<'a> Tokenizer<'a> { // Assumes non-EOF #[inline] - fn next_byte_unchecked(&self) -> u8 { self.byte_at(0) } + fn next_byte_unchecked(&self) -> u8 { + self.byte_at(0) + } #[inline] fn byte_at(&self, offset: usize) -> u8 { @@ -433,8 +433,8 @@ impl<'a> Tokenizer<'a> { #[inline] fn has_newline_at(&self, offset: usize) -> bool { - self.position + offset < self.input.len() && - matches!(self.byte_at(offset), b'\n' | b'\r' | b'\x0C') + self.position + offset < self.input.len() + && matches!(self.byte_at(offset), b'\n' | b'\r' | b'\x0C') } #[inline] @@ -444,7 +444,9 @@ impl<'a> Tokenizer<'a> { self.position += len_utf8; // Note that due to the special case for the 4-byte sequence // intro, we must use wrapping add here. - self.current_line_start_position = self.current_line_start_position.wrapping_add(len_utf8 - c.len_utf16()); + self.current_line_start_position = self + .current_line_start_position + .wrapping_add(len_utf8 - c.len_utf16()); c } @@ -518,6 +520,13 @@ impl<'a> Tokenizer<'a> { #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)] pub struct SourcePosition(pub(crate) usize); +impl SourcePosition { + /// Returns the current byte index in the original input. + #[inline] + pub fn byte_index(&self) -> usize { + self.0 + } +} /// The line and column number for a given position within the input. #[derive(PartialEq, Eq, Debug, Clone, Copy)] @@ -530,10 +539,9 @@ pub struct SourceLocation { pub column: u32, } - fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result, ()> { if tokenizer.is_eof() { - return Err(()) + return Err(()); } let b = tokenizer.next_byte_unchecked(); let token = match_byte! { b, @@ -549,7 +557,7 @@ fn next_token<'a>(tokenizer: &mut Tokeni if is_ident_start(tokenizer) { IDHash(consume_name(tokenizer)) } else if !tokenizer.is_eof() && match tokenizer.next_byte_unchecked() { // Any other valid case here already resulted in IDHash. - b'0'...b'9' | b'-' => true, + b'0'..=b'9' | b'-' => true, _ => false, } { Hash(consume_name(tokenizer)) } else { Delim('#') } @@ -568,11 +576,11 @@ fn next_token<'a>(tokenizer: &mut Tokeni b'+' => { if ( tokenizer.has_at_least(1) - && matches!(tokenizer.byte_at(1), b'0'...b'9') + && matches!(tokenizer.byte_at(1), b'0'..=b'9') ) || ( tokenizer.has_at_least(2) && tokenizer.byte_at(1) == b'.' - && matches!(tokenizer.byte_at(2), b'0'...b'9') + && matches!(tokenizer.byte_at(2), b'0'..=b'9') ) { consume_numeric(tokenizer) } else { @@ -584,11 +592,11 @@ fn next_token<'a>(tokenizer: &mut Tokeni b'-' => { if ( tokenizer.has_at_least(1) - && matches!(tokenizer.byte_at(1), b'0'...b'9') + && matches!(tokenizer.byte_at(1), b'0'..=b'9') ) || ( tokenizer.has_at_least(2) && tokenizer.byte_at(1) == b'.' - && matches!(tokenizer.byte_at(2), b'0'...b'9') + && matches!(tokenizer.byte_at(2), b'0'..=b'9') ) { consume_numeric(tokenizer) } else if tokenizer.starts_with(b"-->") { @@ -603,7 +611,7 @@ fn next_token<'a>(tokenizer: &mut Tokeni }, b'.' => { if tokenizer.has_at_least(1) - && matches!(tokenizer.byte_at(1), b'0'...b'9' + && matches!(tokenizer.byte_at(1), b'0'..=b'9' ) { consume_numeric(tokenizer) } else { @@ -619,7 +627,7 @@ fn next_token<'a>(tokenizer: &mut Tokeni Delim('/') } } - b'0'...b'9' => { consume_numeric(tokenizer) }, + b'0'..=b'9' => { consume_numeric(tokenizer) }, b':' => { tokenizer.advance(1); Colon }, b';' => { tokenizer.advance(1); Semicolon }, b'<' => { @@ -636,7 +644,7 @@ fn next_token<'a>(tokenizer: &mut Tokeni if is_ident_start(tokenizer) { AtKeyword(consume_name(tokenizer)) } else { Delim('@') } }, - b'a'...b'z' | b'A'...b'Z' | b'_' | b'\0' => { consume_ident_like(tokenizer) }, + b'a'..=b'z' | b'A'..=b'Z' | b'_' | b'\0' => { consume_ident_like(tokenizer) }, b'[' => { tokenizer.advance(1); SquareBracketBlock }, b'\\' => { if !tokenizer.has_newline_at(1) { consume_ident_like(tokenizer) } @@ -669,7 +677,6 @@ fn next_token<'a>(tokenizer: &mut Tokeni Ok(token) } - fn consume_whitespace<'a>(tokenizer: &mut Tokenizer<'a>, newline: bool) -> Token<'a> { let start_position = tokenizer.position(); if newline { @@ -694,7 +701,6 @@ fn consume_whitespace<'a>(tokenizer: &mu WhiteSpace(tokenizer.slice_from(start_position)) } - // Check for sourceMappingURL or sourceURL comments and update the // tokenizer appropriately. fn check_for_source_map<'a>(tokenizer: &mut Tokenizer<'a>, contents: &'a str) { @@ -704,9 +710,9 @@ fn check_for_source_map<'a>(tokenizer: & // If there is a source map directive, extract the URL. if contents.starts_with(directive) || contents.starts_with(directive_old) { let contents = &contents[directive.len()..]; - tokenizer.source_map_url = contents.split(|c| { - c == ' ' || c == '\t' || c == '\x0C' || c == '\r' || c == '\n' - }).next() + tokenizer.source_map_url = contents + .split(|c| c == ' ' || c == '\t' || c == '\x0C' || c == '\r' || c == '\n') + .next() } let directive = "# sourceURL="; @@ -715,14 +721,14 @@ fn check_for_source_map<'a>(tokenizer: & // If there is a source map directive, extract the URL. if contents.starts_with(directive) || contents.starts_with(directive_old) { let contents = &contents[directive.len()..]; - tokenizer.source_url = contents.split(|c| { - c == ' ' || c == '\t' || c == '\x0C' || c == '\r' || c == '\n' - }).next() + tokenizer.source_url = contents + .split(|c| c == ' ' || c == '\t' || c == '\x0C' || c == '\r' || c == '\n') + .next() } } fn consume_comment<'a>(tokenizer: &mut Tokenizer<'a>) -> &'a str { - tokenizer.advance(2); // consume "/*" + tokenizer.advance(2); // consume "/*" let start_position = tokenizer.position(); while !tokenizer.is_eof() { match_byte! { tokenizer.next_byte_unchecked(), @@ -739,8 +745,8 @@ fn consume_comment<'a>(tokenizer: &mut T b'\n' | b'\x0C' | b'\r' => { tokenizer.consume_newline(); } - b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); } - b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); } + b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); } + b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); } _ => { // ASCII or other leading byte. tokenizer.advance(1); @@ -755,21 +761,22 @@ fn consume_comment<'a>(tokenizer: &mut T fn consume_string<'a>(tokenizer: &mut Tokenizer<'a>, single_quote: bool) -> Token<'a> { match consume_quoted_string(tokenizer, single_quote) { Ok(value) => QuotedString(value), - Err(value) => BadString(value) + Err(value) => BadString(value), } } - /// Return `Err(())` on syntax error (ie. unescaped newline) -fn consume_quoted_string<'a>(tokenizer: &mut Tokenizer<'a>, single_quote: bool) - -> Result, CowRcStr<'a>> { - tokenizer.advance(1); // Skip the initial quote - // start_pos is at code point boundary, after " or ' +fn consume_quoted_string<'a>( + tokenizer: &mut Tokenizer<'a>, + single_quote: bool, +) -> Result, CowRcStr<'a>> { + tokenizer.advance(1); // Skip the initial quote + // start_pos is at code point boundary, after " or ' let start_pos = tokenizer.position(); let mut string_bytes; loop { if tokenizer.is_eof() { - return Ok(tokenizer.slice_from(start_pos).into()) + return Ok(tokenizer.slice_from(start_pos).into()); } match_byte! { tokenizer.next_byte_unchecked(), b'"' => { @@ -800,8 +807,8 @@ fn consume_quoted_string<'a>(tokenizer: b'\n' | b'\r' | b'\x0C' => { return Err(tokenizer.slice_from(start_pos).into()) }, - b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); } - b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); } + b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); } + b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); } _ => { // ASCII or other leading byte. tokenizer.advance(1); @@ -852,8 +859,8 @@ fn consume_quoted_string<'a>(tokenizer: string_bytes.extend("\u{FFFD}".as_bytes()); continue; } - b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); } - b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); } + b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); } + b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); } _ => { // ASCII or other leading byte. tokenizer.advance(1); @@ -867,30 +874,29 @@ fn consume_quoted_string<'a>(tokenizer: Ok( // string_bytes is well-formed UTF-8, see other comments. - unsafe { from_utf8_release_unchecked(string_bytes) }.into() + unsafe { from_utf8_release_unchecked(string_bytes) }.into(), ) } - #[inline] fn is_ident_start(tokenizer: &mut Tokenizer) -> bool { - !tokenizer.is_eof() && match_byte! { tokenizer.next_byte_unchecked(), - b'a'...b'z' | b'A'...b'Z' | b'_' | b'\0' => { true }, - b'-' => { - tokenizer.has_at_least(1) && match_byte! { tokenizer.byte_at(1), - b'a'...b'z' | b'A'...b'Z' | b'-' | b'_' | b'\0' => { - true + !tokenizer.is_eof() + && match_byte! { tokenizer.next_byte_unchecked(), + b'a'..=b'z' | b'A'..=b'Z' | b'_' | b'\0' => { true }, + b'-' => { + tokenizer.has_at_least(1) && match_byte! { tokenizer.byte_at(1), + b'a'..=b'z' | b'A'..=b'Z' | b'-' | b'_' | b'\0' => { + true + } + b'\\' => { !tokenizer.has_newline_at(1) } + b => { !b.is_ascii() }, } - b'\\' => { !tokenizer.has_newline_at(1) } - b => { !b.is_ascii() }, - } - }, - b'\\' => { !tokenizer.has_newline_at(1) }, - b => { !b.is_ascii() }, - } + }, + b'\\' => { !tokenizer.has_newline_at(1) }, + b => { !b.is_ascii() }, + } } - fn consume_ident_like<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> { let value = consume_name(tokenizer); if !tokenizer.is_eof() && tokenizer.next_byte_unchecked() == b'(' { @@ -912,10 +918,10 @@ fn consume_name<'a>(tokenizer: &mut Toke let mut value_bytes; loop { if tokenizer.is_eof() { - return tokenizer.slice_from(start_pos).into() + return tokenizer.slice_from(start_pos).into(); } match_byte! { tokenizer.next_byte_unchecked(), - b'a'...b'z' | b'A'...b'Z' | b'0'...b'9' | b'_' | b'-' => { tokenizer.advance(1) }, + b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' | b'_' | b'-' => { tokenizer.advance(1) }, b'\\' | b'\0' => { // * The tokenizer’s input is UTF-8 since it’s `&str`. // * start_pos is at a code point boundary @@ -925,10 +931,10 @@ fn consume_name<'a>(tokenizer: &mut Toke value_bytes = tokenizer.slice_from(start_pos).as_bytes().to_owned(); break } - b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); } - b'\xC0'...b'\xEF' => { tokenizer.advance(1); } - b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); } - b => { + b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); } + b'\xC0'..=b'\xEF' => { tokenizer.advance(1); } + b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); } + _b => { return tokenizer.slice_from(start_pos).into(); } } @@ -937,7 +943,7 @@ fn consume_name<'a>(tokenizer: &mut Toke while !tokenizer.is_eof() { let b = tokenizer.next_byte_unchecked(); match_byte! { b, - b'a'...b'z' | b'A'...b'Z' | b'0'...b'9' | b'_' | b'-' => { + b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' | b'_' | b'-' => { tokenizer.advance(1); value_bytes.push(b) // ASCII } @@ -951,19 +957,19 @@ fn consume_name<'a>(tokenizer: &mut Toke tokenizer.advance(1); value_bytes.extend("\u{FFFD}".as_bytes()); }, - b'\x80'...b'\xBF' => { + b'\x80'..=b'\xBF' => { // This byte *is* part of a multi-byte code point, // we’ll end up copying the whole code point before this loop does something else. tokenizer.consume_continuation_byte(); value_bytes.push(b) } - b'\xC0'...b'\xEF' => { + b'\xC0'..=b'\xEF' => { // This byte *is* part of a multi-byte code point, // we’ll end up copying the whole code point before this loop does something else. tokenizer.advance(1); value_bytes.push(b) } - b'\xF0'...b'\xFF' => { + b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); value_bytes.push(b) } @@ -979,9 +985,9 @@ fn consume_name<'a>(tokenizer: &mut Toke fn byte_to_hex_digit(b: u8) -> Option { Some(match_byte! { b, - b'0' ... b'9' => { b - b'0' }, - b'a' ... b'f' => { b - b'a' + 10 }, - b'A' ... b'F' => { b - b'A' + 10 }, + b'0' ..= b'9' => { b - b'0' }, + b'a' ..= b'f' => { b - b'a' + 10 }, + b'A' ..= b'F' => { b - b'A' + 10 }, _ => { return None } @@ -1017,37 +1023,37 @@ fn consume_numeric<'a>(tokenizer: &mut T integral_part = integral_part * 10. + digit as f64; tokenizer.advance(1); if tokenizer.is_eof() { - break + break; } } let mut is_integer = true; let mut fractional_part: f64 = 0.; - if tokenizer.has_at_least(1) && tokenizer.next_byte_unchecked() == b'.' - && matches!(tokenizer.byte_at(1), b'0'...b'9') { + if tokenizer.has_at_least(1) + && tokenizer.next_byte_unchecked() == b'.' + && matches!(tokenizer.byte_at(1), b'0'..=b'9') + { is_integer = false; - tokenizer.advance(1); // Consume '.' + tokenizer.advance(1); // Consume '.' let mut factor = 0.1; while let Some(digit) = byte_to_decimal_digit(tokenizer.next_byte_unchecked()) { fractional_part += digit as f64 * factor; factor *= 0.1; tokenizer.advance(1); if tokenizer.is_eof() { - break + break; } } } let mut value = sign * (integral_part + fractional_part); - if tokenizer.has_at_least(1) - && matches!(tokenizer.next_byte_unchecked(), b'e' | b'E') { - - if matches!(tokenizer.byte_at(1), b'0'...b'9') || - (tokenizer.has_at_least(2) - && matches!(tokenizer.byte_at(1), b'+' | b'-') - && matches!(tokenizer.byte_at(2), b'0'...b'9')) + if tokenizer.has_at_least(1) && matches!(tokenizer.next_byte_unchecked(), b'e' | b'E') { + if matches!(tokenizer.byte_at(1), b'0'..=b'9') + || (tokenizer.has_at_least(2) + && matches!(tokenizer.byte_at(1), b'+' | b'-') + && matches!(tokenizer.byte_at(2), b'0'..=b'9')) { is_integer = false; tokenizer.advance(1); @@ -1064,7 +1070,7 @@ fn consume_numeric<'a>(tokenizer: &mut T exponent = exponent * 10. + digit as f64; tokenizer.advance(1); if tokenizer.is_eof() { - break + break; } } value *= f64::powf(10., sign * exponent); @@ -1089,7 +1095,7 @@ fn consume_numeric<'a>(tokenizer: &mut T unit_value: (value / 100.) as f32, int_value: int_value, has_sign: has_sign, - } + }; } let value = value as f32; if is_ident_start(tokenizer) { @@ -1109,7 +1115,6 @@ fn consume_numeric<'a>(tokenizer: &mut T } } - #[inline] unsafe fn from_utf8_release_unchecked(string_bytes: Vec) -> String { if cfg!(debug_assertions) { @@ -1132,7 +1137,7 @@ fn consume_unquoted_url<'a>(tokenizer: & Some(item) => item, None => { tokenizer.position = tokenizer.input.len(); - break + break; } }; match_byte! { b, @@ -1174,9 +1179,9 @@ fn consume_unquoted_url<'a>(tokenizer: & if found_printable_char { // This function only consumed ASCII (whitespace) bytes, // so the current position is a code point boundary. - return Ok(consume_unquoted_url_internal(tokenizer)) + return Ok(consume_unquoted_url_internal(tokenizer)); } else { - return Ok(UnquotedUrl("".into())) + return Ok(UnquotedUrl("".into())); } fn consume_unquoted_url_internal<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> { @@ -1185,7 +1190,7 @@ fn consume_unquoted_url<'a>(tokenizer: & let mut string_bytes: Vec; loop { if tokenizer.is_eof() { - return UnquotedUrl(tokenizer.slice_from(start_pos).into()) + return UnquotedUrl(tokenizer.slice_from(start_pos).into()); } match_byte! { tokenizer.next_byte_unchecked(), b' ' | b'\t' | b'\n' | b'\r' | b'\x0C' => { @@ -1197,7 +1202,7 @@ fn consume_unquoted_url<'a>(tokenizer: & tokenizer.advance(1); return UnquotedUrl(value.into()) } - b'\x01'...b'\x08' | b'\x0B' | b'\x0E'...b'\x1F' | b'\x7F' // non-printable + b'\x01'..=b'\x08' | b'\x0B' | b'\x0E'..=b'\x1F' | b'\x7F' // non-printable | b'"' | b'\'' | b'(' => { tokenizer.advance(1); return consume_bad_url(tokenizer, start_pos) @@ -1211,8 +1216,8 @@ fn consume_unquoted_url<'a>(tokenizer: & string_bytes = tokenizer.slice_from(start_pos).as_bytes().to_owned(); break } - b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); } - b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); } + b'\x80'..=b'\xBF' => { tokenizer.consume_continuation_byte(); } + b'\xF0'..=b'\xFF' => { tokenizer.consume_4byte_intro(); } _ => { // ASCII or other leading byte. tokenizer.advance(1); @@ -1231,7 +1236,7 @@ fn consume_unquoted_url<'a>(tokenizer: & tokenizer.advance(1); break; } - b'\x01'...b'\x08' | b'\x0B' | b'\x0E'...b'\x1F' | b'\x7F' // non-printable + b'\x01'..=b'\x08' | b'\x0B' | b'\x0E'..=b'\x1F' | b'\x7F' // non-printable | b'"' | b'\'' | b'(' => { tokenizer.advance(1); return consume_bad_url(tokenizer, start_pos); @@ -1249,13 +1254,13 @@ fn consume_unquoted_url<'a>(tokenizer: & tokenizer.advance(1); string_bytes.extend("\u{FFFD}".as_bytes()); } - b'\x80'...b'\xBF' => { + b'\x80'..=b'\xBF' => { // We’ll end up copying the whole code point // before this loop does something else. tokenizer.consume_continuation_byte(); string_bytes.push(b); } - b'\xF0'...b'\xFF' => { + b'\xF0'..=b'\xFF' => { // We’ll end up copying the whole code point // before this loop does something else. tokenizer.consume_4byte_intro(); @@ -1272,14 +1277,15 @@ fn consume_unquoted_url<'a>(tokenizer: & } UnquotedUrl( // string_bytes is well-formed UTF-8, see other comments. - unsafe { from_utf8_release_unchecked(string_bytes) }.into() + unsafe { from_utf8_release_unchecked(string_bytes) }.into(), ) } - fn consume_url_end<'a>(tokenizer: &mut Tokenizer<'a>, - start_pos: SourcePosition, - string: CowRcStr<'a>) - -> Token<'a> { + fn consume_url_end<'a>( + tokenizer: &mut Tokenizer<'a>, + start_pos: SourcePosition, + string: CowRcStr<'a>, + ) -> Token<'a> { while !tokenizer.is_eof() { match_byte! { tokenizer.next_byte_unchecked(), b')' => { @@ -1337,26 +1343,31 @@ fn consume_hex_digits<'a>(tokenizer: &mu digits += 1; tokenizer.advance(1); } - None => break + None => break, } } (value, digits) } - // Same constraints as consume_escape except it writes into `bytes` the result // instead of returning it. fn consume_escape_and_write(tokenizer: &mut Tokenizer, bytes: &mut Vec) { - bytes.extend(consume_escape(tokenizer).encode_utf8(&mut [0; 4]).as_bytes()) + bytes.extend( + consume_escape(tokenizer) + .encode_utf8(&mut [0; 4]) + .as_bytes(), + ) } // Assumes that the U+005C REVERSE SOLIDUS (\) has already been consumed // and that the next input character has already been verified // to not be a newline. fn consume_escape(tokenizer: &mut Tokenizer) -> char { - if tokenizer.is_eof() { return '\u{FFFD}' } // Escaped EOF + if tokenizer.is_eof() { + return '\u{FFFD}'; + } // Escaped EOF match_byte! { tokenizer.next_byte_unchecked(), - b'0'...b'9' | b'A'...b'F' | b'a'...b'f' => { + b'0'..=b'9' | b'A'..=b'F' | b'a'..=b'f' => { let (c, _) = consume_hex_digits(tokenizer); if !tokenizer.is_eof() { match_byte! { tokenizer.next_byte_unchecked(), --- a/vendor/cssparser/src/unicode_range.rs +++ b/vendor/cssparser/src/unicode_range.rs @@ -4,15 +4,16 @@ //! https://drafts.csswg.org/css-syntax/#urange -use {Parser, ToCss, BasicParseError}; use std::char; use std::fmt; use tokenizer::Token; +use {BasicParseError, Parser, ToCss}; /// One contiguous range of code points. /// /// Can not be empty. Can represent a single code point when start == end. #[derive(PartialEq, Eq, Clone, Hash)] +#[repr(C)] pub struct UnicodeRange { /// Inclusive start of the range. In [0, end]. pub start: u32, @@ -43,7 +44,10 @@ impl UnicodeRange { let range = match parse_concatenated(concatenated_tokens.as_bytes()) { Ok(range) => range, - Err(()) => return Err(input.new_basic_unexpected_token_error(Token::Ident(concatenated_tokens.into()))), + Err(()) => { + return Err(input + .new_basic_unexpected_token_error(Token::Ident(concatenated_tokens.into()))) + } }; if range.end > char::MAX as u32 || range.start > range.end { Err(input.new_basic_unexpected_token_error(Token::Ident(concatenated_tokens.into()))) @@ -60,23 +64,21 @@ fn parse_tokens<'i, 't>(input: &mut Pars match input.next_including_whitespace()?.clone() { Token::Ident(_) => {} Token::Delim('?') => {} - t => return Err(input.new_basic_unexpected_token_error(t)) + t => return Err(input.new_basic_unexpected_token_error(t)), } parse_question_marks(input) } - Token::Dimension { .. } => { - parse_question_marks(input) - } + Token::Dimension { .. } => parse_question_marks(input), Token::Number { .. } => { let after_number = input.state(); match input.next_including_whitespace() { Ok(&Token::Delim('?')) => parse_question_marks(input), Ok(&Token::Dimension { .. }) => {} Ok(&Token::Number { .. }) => {} - _ => input.reset(&after_number) + _ => input.reset(&after_number), } } - t => return Err(input.new_basic_unexpected_token_error(t)) + t => return Err(input.new_basic_unexpected_token_error(t)), } Ok(()) } @@ -89,7 +91,7 @@ fn parse_question_marks(input: &mut Pars Ok(&Token::Delim('?')) => {} _ => { input.reset(&start); - return + return; } } } @@ -98,13 +100,13 @@ fn parse_question_marks(input: &mut Pars fn parse_concatenated(text: &[u8]) -> Result { let mut text = match text.split_first() { Some((&b'+', text)) => text, - _ => return Err(()) + _ => return Err(()), }; let (first_hex_value, hex_digit_count) = consume_hex(&mut text); let question_marks = consume_question_marks(&mut text); let consumed = hex_digit_count + question_marks; if consumed == 0 || consumed > 6 { - return Err(()) + return Err(()); } if question_marks > 0 { @@ -112,13 +114,13 @@ fn parse_concatenated(text: &[u8]) -> Re return Ok(UnicodeRange { start: first_hex_value << (question_marks * 4), end: ((first_hex_value + 1) << (question_marks * 4)) - 1, - }) + }); } } else if text.is_empty() { return Ok(UnicodeRange { start: first_hex_value, end: first_hex_value, - }) + }); } else { if let Some((&b'-', mut text)) = text.split_first() { let (second_hex_value, hex_digit_count) = consume_hex(&mut text); @@ -126,7 +128,7 @@ fn parse_concatenated(text: &[u8]) -> Re return Ok(UnicodeRange { start: first_hex_value, end: second_hex_value, - }) + }); } } } @@ -142,7 +144,7 @@ fn consume_hex(text: &mut &[u8]) -> (u32 digits += 1; *text = rest } else { - break + break; } } (value, digits) @@ -164,7 +166,10 @@ impl fmt::Debug for UnicodeRange { } impl ToCss for UnicodeRange { - fn to_css(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { + fn to_css(&self, dest: &mut W) -> fmt::Result + where + W: fmt::Write, + { write!(dest, "U+{:X}", self.start)?; if self.end != self.start { write!(dest, "-{:X}", self.end)?; --- /dev/null +++ b/vendor/proc-macro2-0.4.19/Cargo.toml @@ -0,0 +1,38 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "proc-macro2" +version = "0.4.19" +authors = ["Alex Crichton "] +build = "build.rs" +description = "A stable implementation of the upcoming new `proc_macro` API. Comes with an\noption, off by default, to also reimplement itself in terms of the upstream\nunstable API.\n" +homepage = "https://github.com/alexcrichton/proc-macro2" +documentation = "https://docs.rs/proc-macro2" +readme = "README.md" +keywords = ["macros"] +license = "MIT/Apache-2.0" +repository = "https://github.com/alexcrichton/proc-macro2" +[package.metadata.docs.rs] +rustdoc-args = ["--cfg", "procmacro2_semver_exempt"] + +[lib] +doctest = false +[dependencies.unicode-xid] +version = "0.1" + +[features] +default = ["proc-macro"] +nightly = ["proc-macro"] +proc-macro = [] +[badges.travis-ci] +repository = "alexcrichton/proc-macro2" --- /dev/null +++ b/vendor/proc-macro2-0.4.19/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. --- /dev/null +++ b/vendor/proc-macro2-0.4.19/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 Alex Crichton + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. --- /dev/null +++ b/vendor/proc-macro2-0.4.19/README.md @@ -0,0 +1,82 @@ +# proc-macro2 + +[![Build Status](https://api.travis-ci.org/alexcrichton/proc-macro2.svg?branch=master)](https://travis-ci.org/alexcrichton/proc-macro2) +[![Latest Version](https://img.shields.io/crates/v/proc-macro2.svg)](https://crates.io/crates/proc-macro2) +[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/proc-macro2) + +A small shim over the `proc_macro` crate in the compiler intended to multiplex +the stable interface as of 1.15.0 and the interface as of 1.30.0. + +New features added in Rust 1.30.0 include: + +* Span information on tokens +* No need to go in/out through strings +* Structured input/output + +Libraries ported to `proc_macro2` can retain support for older compilers while +continuing to get all the nice benefits of using a 1.30.0+ compiler. + +## Usage + +This crate compiles on all 1.15.0+ stable compilers and usage looks like: + +```toml +[dependencies] +proc-macro2 = "0.4" +``` + +followed by + +```rust +extern crate proc_macro; +extern crate proc_macro2; + +#[proc_macro_derive(MyDerive)] +pub fn my_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let input: proc_macro2::TokenStream = input.into(); + + let output: proc_macro2::TokenStream = { + /* transform input */ + }; + + output.into() +} +``` + +The 1.30.0 compiler is automatically detected and its interfaces are used when +available. + +## Unstable Features + +`proc-macro2` supports exporting some methods from `proc_macro` which are +currently highly unstable, and are not stabilized in the first pass of +`proc_macro` stabilizations. These features are not exported by default. Minor +versions of `proc-macro2` may make breaking changes to them at any time. + +To enable these features, the `procmacro2_semver_exempt` config flag must be +passed to rustc. + +``` +RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build +``` + +Note that this must not only be done for your crate, but for any crate that +depends on your crate. This infectious nature is intentional, as it serves as a +reminder that you are outside of the normal semver guarantees. + +# License + +This project is licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Serde by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. --- /dev/null +++ b/vendor/proc-macro2-0.4.19/build.rs @@ -0,0 +1,61 @@ +use std::env; +use std::process::Command; +use std::str; + +fn main() { + println!("cargo:rerun-if-changed=build.rs"); + + let target = env::var("TARGET").unwrap(); + + if !enable_use_proc_macro(&target) { + return; + } + println!("cargo:rustc-cfg=use_proc_macro"); + + let minor = match rustc_minor_version() { + Some(n) => n, + None => return, + }; + + // Rust 1.29 stabilized the necessary APIs in the `proc_macro` crate + if minor >= 29 || cfg!(feature = "nightly") { + println!("cargo:rustc-cfg=wrap_proc_macro"); + + if cfg!(procmacro2_semver_exempt) { + println!("cargo:rustc-cfg=super_unstable"); + } + } + + if minor == 29 { + println!("cargo:rustc-cfg=slow_extend"); + } +} + +fn enable_use_proc_macro(target: &str) -> bool { + // wasm targets don't have the `proc_macro` crate, disable this feature. + if target.contains("wasm32") { + return false; + } + + // Otherwise, only enable it if our feature is actually enabled. + cfg!(feature = "proc-macro") +} + +fn rustc_minor_version() -> Option { + macro_rules! otry { + ($e:expr) => { + match $e { + Some(e) => e, + None => return None, + } + }; + } + let rustc = otry!(env::var_os("RUSTC")); + let output = otry!(Command::new(rustc).arg("--version").output().ok()); + let version = otry!(str::from_utf8(&output.stdout).ok()); + let mut pieces = version.split('.'); + if pieces.next() != Some("rustc 1") { + return None; + } + otry!(pieces.next()).parse().ok() +} --- /dev/null +++ b/vendor/proc-macro2-0.4.19/src/lib.rs @@ -0,0 +1,1080 @@ +//! A "shim crate" intended to multiplex the [`proc_macro`] API on to stable +//! Rust. +//! +//! Procedural macros in Rust operate over the upstream +//! [`proc_macro::TokenStream`][ts] type. This type currently is quite +//! conservative and exposed no internal implementation details. Nightly +//! compilers, however, contain a much richer interface. This richer interface +//! allows fine-grained inspection of the token stream which avoids +//! stringification/re-lexing and also preserves span information. +//! +//! The upcoming APIs added to [`proc_macro`] upstream are the foundation for +//! productive procedural macros in the ecosystem. To help prepare the ecosystem +//! for using them this crate serves to both compile on stable and nightly and +//! mirrors the API-to-be. The intention is that procedural macros which switch +//! to use this crate will be trivially able to switch to the upstream +//! `proc_macro` crate once its API stabilizes. +//! +//! In the meantime this crate also has a `nightly` Cargo feature which +//! enables it to reimplement itself with the unstable API of [`proc_macro`]. +//! This'll allow immediate usage of the beneficial upstream API, particularly +//! around preserving span information. +//! +//! # Unstable Features +//! +//! `proc-macro2` supports exporting some methods from `proc_macro` which are +//! currently highly unstable, and may not be stabilized in the first pass of +//! `proc_macro` stabilizations. These features are not exported by default. +//! Minor versions of `proc-macro2` may make breaking changes to them at any +//! time. +//! +//! To enable these features, the `procmacro2_semver_exempt` config flag must be +//! passed to rustc. +//! +//! ```sh +//! RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build +//! ``` +//! +//! Note that this must not only be done for your crate, but for any crate that +//! depends on your crate. This infectious nature is intentional, as it serves +//! as a reminder that you are outside of the normal semver guarantees. +//! +//! [`proc_macro`]: https://doc.rust-lang.org/proc_macro/ +//! [ts]: https://doc.rust-lang.org/proc_macro/struct.TokenStream.html + +// Proc-macro2 types in rustdoc of other crates get linked to here. +#![doc(html_root_url = "https://docs.rs/proc-macro2/0.4.19")] +#![cfg_attr( + super_unstable, + feature(proc_macro_raw_ident, proc_macro_span) +)] + +#[cfg(use_proc_macro)] +extern crate proc_macro; +extern crate unicode_xid; + +use std::cmp::Ordering; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::iter::FromIterator; +use std::marker; +use std::rc::Rc; +use std::str::FromStr; + +#[macro_use] +mod strnom; +mod stable; + +#[cfg(not(wrap_proc_macro))] +use stable as imp; +#[path = "unstable.rs"] +#[cfg(wrap_proc_macro)] +mod imp; + +/// An abstract stream of tokens, or more concretely a sequence of token trees. +/// +/// This type provides interfaces for iterating over token trees and for +/// collecting token trees into one stream. +/// +/// Token stream is both the input and output of `#[proc_macro]`, +/// `#[proc_macro_attribute]` and `#[proc_macro_derive]` definitions. +#[derive(Clone)] +pub struct TokenStream { + inner: imp::TokenStream, + _marker: marker::PhantomData>, +} + +/// Error returned from `TokenStream::from_str`. +pub struct LexError { + inner: imp::LexError, + _marker: marker::PhantomData>, +} + +impl TokenStream { + fn _new(inner: imp::TokenStream) -> TokenStream { + TokenStream { + inner: inner, + _marker: marker::PhantomData, + } + } + + fn _new_stable(inner: stable::TokenStream) -> TokenStream { + TokenStream { + inner: inner.into(), + _marker: marker::PhantomData, + } + } + + /// Returns an empty `TokenStream` containing no token trees. + pub fn new() -> TokenStream { + TokenStream::_new(imp::TokenStream::new()) + } + + #[deprecated(since = "0.4.4", note = "please use TokenStream::new")] + pub fn empty() -> TokenStream { + TokenStream::new() + } + + /// Checks if this `TokenStream` is empty. + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } +} + +/// `TokenStream::default()` returns an empty stream, +/// i.e. this is equivalent with `TokenStream::new()`. +impl Default for TokenStream { + fn default() -> Self { + TokenStream::new() + } +} + +/// Attempts to break the string into tokens and parse those tokens into a token +/// stream. +/// +/// May fail for a number of reasons, for example, if the string contains +/// unbalanced delimiters or characters not existing in the language. +/// +/// NOTE: Some errors may cause panics instead of returning `LexError`. We +/// reserve the right to change these errors into `LexError`s later. +impl FromStr for TokenStream { + type Err = LexError; + + fn from_str(src: &str) -> Result { + let e = src.parse().map_err(|e| LexError { + inner: e, + _marker: marker::PhantomData, + })?; + Ok(TokenStream::_new(e)) + } +} + +#[cfg(use_proc_macro)] +impl From for TokenStream { + fn from(inner: proc_macro::TokenStream) -> TokenStream { + TokenStream::_new(inner.into()) + } +} + +#[cfg(use_proc_macro)] +impl From for proc_macro::TokenStream { + fn from(inner: TokenStream) -> proc_macro::TokenStream { + inner.inner.into() + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + self.inner.extend(streams) + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + self.inner + .extend(streams.into_iter().map(|stream| stream.inner)) + } +} + +/// Collects a number of token trees into a single stream. +impl FromIterator for TokenStream { + fn from_iter>(streams: I) -> Self { + TokenStream::_new(streams.into_iter().collect()) + } +} +impl FromIterator for TokenStream { + fn from_iter>(streams: I) -> Self { + TokenStream::_new(streams.into_iter().map(|i| i.inner).collect()) + } +} + +/// Prints the token stream as a string that is supposed to be losslessly +/// convertible back into the same token stream (modulo spans), except for +/// possibly `TokenTree::Group`s with `Delimiter::None` delimiters and negative +/// numeric literals. +impl fmt::Display for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} + +/// Prints token in a form convenient for debugging. +impl fmt::Debug for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} + +impl fmt::Debug for LexError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} + +// Returned by reference, so we can't easily wrap it. +#[cfg(procmacro2_semver_exempt)] +pub use imp::FileName; + +/// The source file of a given `Span`. +/// +/// This type is semver exempt and not exposed by default. +#[cfg(procmacro2_semver_exempt)] +#[derive(Clone, PartialEq, Eq)] +pub struct SourceFile(imp::SourceFile); + +#[cfg(procmacro2_semver_exempt)] +impl SourceFile { + /// Get the path to this source file. + /// + /// ### Note + /// + /// If the code span associated with this `SourceFile` was generated by an + /// external macro, this may not be an actual path on the filesystem. Use + /// [`is_real`] to check. + /// + /// Also note that even if `is_real` returns `true`, if + /// `--remap-path-prefix` was passed on the command line, the path as given + /// may not actually be valid. + /// + /// [`is_real`]: #method.is_real + pub fn path(&self) -> &FileName { + self.0.path() + } + + /// Returns `true` if this source file is a real source file, and not + /// generated by an external macro's expansion. + pub fn is_real(&self) -> bool { + self.0.is_real() + } +} + +#[cfg(procmacro2_semver_exempt)] +impl AsRef for SourceFile { + fn as_ref(&self) -> &FileName { + self.0.path() + } +} + +#[cfg(procmacro2_semver_exempt)] +impl fmt::Debug for SourceFile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +/// A line-column pair representing the start or end of a `Span`. +/// +/// This type is semver exempt and not exposed by default. +#[cfg(procmacro2_semver_exempt)] +pub struct LineColumn { + /// The 1-indexed line in the source file on which the span starts or ends + /// (inclusive). + pub line: usize, + /// The 0-indexed column (in UTF-8 characters) in the source file on which + /// the span starts or ends (inclusive). + pub column: usize, +} + +/// A region of source code, along with macro expansion information. +#[derive(Copy, Clone)] +pub struct Span { + inner: imp::Span, + _marker: marker::PhantomData>, +} + +impl Span { + fn _new(inner: imp::Span) -> Span { + Span { + inner: inner, + _marker: marker::PhantomData, + } + } + + fn _new_stable(inner: stable::Span) -> Span { + Span { + inner: inner.into(), + _marker: marker::PhantomData, + } + } + + /// The span of the invocation of the current procedural macro. + /// + /// Identifiers created with this span will be resolved as if they were + /// written directly at the macro call location (call-site hygiene) and + /// other code at the macro call site will be able to refer to them as well. + pub fn call_site() -> Span { + Span::_new(imp::Span::call_site()) + } + + /// A span that resolves at the macro definition site. + /// + /// This method is semver exempt and not exposed by default. + #[cfg(procmacro2_semver_exempt)] + pub fn def_site() -> Span { + Span::_new(imp::Span::def_site()) + } + + /// Creates a new span with the same line/column information as `self` but + /// that resolves symbols as though it were at `other`. + /// + /// This method is semver exempt and not exposed by default. + #[cfg(procmacro2_semver_exempt)] + pub fn resolved_at(&self, other: Span) -> Span { + Span::_new(self.inner.resolved_at(other.inner)) + } + + /// Creates a new span with the same name resolution behavior as `self` but + /// with the line/column information of `other`. + /// + /// This method is semver exempt and not exposed by default. + #[cfg(procmacro2_semver_exempt)] + pub fn located_at(&self, other: Span) -> Span { + Span::_new(self.inner.located_at(other.inner)) + } + + /// This method is only available when the `"nightly"` feature is enabled. + #[doc(hidden)] + #[cfg(any(feature = "nightly", super_unstable))] + pub fn unstable(self) -> proc_macro::Span { + self.inner.unstable() + } + + /// The original source file into which this span points. + /// + /// This method is semver exempt and not exposed by default. + #[cfg(procmacro2_semver_exempt)] + pub fn source_file(&self) -> SourceFile { + SourceFile(self.inner.source_file()) + } + + /// Get the starting line/column in the source file for this span. + /// + /// This method is semver exempt and not exposed by default. + #[cfg(procmacro2_semver_exempt)] + pub fn start(&self) -> LineColumn { + let imp::LineColumn { line, column } = self.inner.start(); + LineColumn { + line: line, + column: column, + } + } + + /// Get the ending line/column in the source file for this span. + /// + /// This method is semver exempt and not exposed by default. + #[cfg(procmacro2_semver_exempt)] + pub fn end(&self) -> LineColumn { + let imp::LineColumn { line, column } = self.inner.end(); + LineColumn { + line: line, + column: column, + } + } + + /// Create a new span encompassing `self` and `other`. + /// + /// Returns `None` if `self` and `other` are from different files. + /// + /// This method is semver exempt and not exposed by default. + #[cfg(procmacro2_semver_exempt)] + pub fn join(&self, other: Span) -> Option { + self.inner.join(other.inner).map(Span::_new) + } + + /// Compares to spans to see if they're equal. + /// + /// This method is semver exempt and not exposed by default. + #[cfg(procmacro2_semver_exempt)] + pub fn eq(&self, other: &Span) -> bool { + self.inner.eq(&other.inner) + } +} + +/// Prints a span in a form convenient for debugging. +impl fmt::Debug for Span { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} + +/// A single token or a delimited sequence of token trees (e.g. `[1, (), ..]`). +#[derive(Clone)] +pub enum TokenTree { + /// A token stream surrounded by bracket delimiters. + Group(Group), + /// An identifier. + Ident(Ident), + /// A single punctuation character (`+`, `,`, `$`, etc.). + Punct(Punct), + /// A literal character (`'a'`), string (`"hello"`), number (`2.3`), etc. + Literal(Literal), +} + +impl TokenTree { + /// Returns the span of this tree, delegating to the `span` method of + /// the contained token or a delimited stream. + pub fn span(&self) -> Span { + match *self { + TokenTree::Group(ref t) => t.span(), + TokenTree::Ident(ref t) => t.span(), + TokenTree::Punct(ref t) => t.span(), + TokenTree::Literal(ref t) => t.span(), + } + } + + /// Configures the span for *only this token*. + /// + /// Note that if this token is a `Group` then this method will not configure + /// the span of each of the internal tokens, this will simply delegate to + /// the `set_span` method of each variant. + pub fn set_span(&mut self, span: Span) { + match *self { + TokenTree::Group(ref mut t) => t.set_span(span), + TokenTree::Ident(ref mut t) => t.set_span(span), + TokenTree::Punct(ref mut t) => t.set_span(span), + TokenTree::Literal(ref mut t) => t.set_span(span), + } + } +} + +impl From for TokenTree { + fn from(g: Group) -> TokenTree { + TokenTree::Group(g) + } +} + +impl From for TokenTree { + fn from(g: Ident) -> TokenTree { + TokenTree::Ident(g) + } +} + +impl From for TokenTree { + fn from(g: Punct) -> TokenTree { + TokenTree::Punct(g) + } +} + +impl From for TokenTree { + fn from(g: Literal) -> TokenTree { + TokenTree::Literal(g) + } +} + +/// Prints the token tree as a string that is supposed to be losslessly +/// convertible back into the same token tree (modulo spans), except for +/// possibly `TokenTree::Group`s with `Delimiter::None` delimiters and negative +/// numeric literals. +impl fmt::Display for TokenTree { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + TokenTree::Group(ref t) => t.fmt(f), + TokenTree::Ident(ref t) => t.fmt(f), + TokenTree::Punct(ref t) => t.fmt(f), + TokenTree::Literal(ref t) => t.fmt(f), + } + } +} + +/// Prints token tree in a form convenient for debugging. +impl fmt::Debug for TokenTree { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // Each of these has the name in the struct type in the derived debug, + // so don't bother with an extra layer of indirection + match *self { + TokenTree::Group(ref t) => t.fmt(f), + TokenTree::Ident(ref t) => { + let mut debug = f.debug_struct("Ident"); + debug.field("sym", &format_args!("{}", t)); + #[cfg(any(feature = "nightly", procmacro2_semver_exempt))] + debug.field("span", &t.span()); + debug.finish() + } + TokenTree::Punct(ref t) => t.fmt(f), + TokenTree::Literal(ref t) => t.fmt(f), + } + } +} + +/// A delimited token stream. +/// +/// A `Group` internally contains a `TokenStream` which is surrounded by +/// `Delimiter`s. +#[derive(Clone)] +pub struct Group { + delimiter: Delimiter, + stream: TokenStream, + span: Span, +} + +/// Describes how a sequence of token trees is delimited. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum Delimiter { + /// `( ... )` + Parenthesis, + /// `{ ... }` + Brace, + /// `[ ... ]` + Bracket, + /// `Ø ... Ø` + /// + /// An implicit delimiter, that may, for example, appear around tokens + /// coming from a "macro variable" `$var`. It is important to preserve + /// operator priorities in cases like `$var * 3` where `$var` is `1 + 2`. + /// Implicit delimiters may not survive roundtrip of a token stream through + /// a string. + None, +} + +impl Group { + /// Creates a new `Group` with the given delimiter and token stream. + /// + /// This constructor will set the span for this group to + /// `Span::call_site()`. To change the span you can use the `set_span` + /// method below. + pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group { + Group { + delimiter: delimiter, + stream: stream, + span: Span::call_site(), + } + } + + /// Returns the delimiter of this `Group` + pub fn delimiter(&self) -> Delimiter { + self.delimiter + } + + /// Returns the `TokenStream` of tokens that are delimited in this `Group`. + /// + /// Note that the returned token stream does not include the delimiter + /// returned above. + pub fn stream(&self) -> TokenStream { + self.stream.clone() + } + + /// Returns the span for the delimiters of this token stream, spanning the + /// entire `Group`. + pub fn span(&self) -> Span { + self.span + } + + /// Configures the span for this `Group`'s delimiters, but not its internal + /// tokens. + /// + /// This method will **not** set the span of all the internal tokens spanned + /// by this group, but rather it will only set the span of the delimiter + /// tokens at the level of the `Group`. + pub fn set_span(&mut self, span: Span) { + self.span = span; + } +} + +/// Prints the group as a string that should be losslessly convertible back +/// into the same group (modulo spans), except for possibly `TokenTree::Group`s +/// with `Delimiter::None` delimiters. +impl fmt::Display for Group { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (left, right) = match self.delimiter { + Delimiter::Parenthesis => ("(", ")"), + Delimiter::Brace => ("{", "}"), + Delimiter::Bracket => ("[", "]"), + Delimiter::None => ("", ""), + }; + + f.write_str(left)?; + self.stream.fmt(f)?; + f.write_str(right)?; + + Ok(()) + } +} + +impl fmt::Debug for Group { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut debug = fmt.debug_struct("Group"); + debug.field("delimiter", &self.delimiter); + debug.field("stream", &self.stream); + #[cfg(procmacro2_semver_exempt)] + debug.field("span", &self.span); + debug.finish() + } +} + +/// An `Punct` is an single punctuation character like `+`, `-` or `#`. +/// +/// Multicharacter operators like `+=` are represented as two instances of +/// `Punct` with different forms of `Spacing` returned. +#[derive(Clone)] +pub struct Punct { + op: char, + spacing: Spacing, + span: Span, +} + +/// Whether an `Punct` is followed immediately by another `Punct` or followed by +/// another token or whitespace. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum Spacing { + /// E.g. `+` is `Alone` in `+ =`, `+ident` or `+()`. + Alone, + /// E.g. `+` is `Joint` in `+=` or `'#`. + /// + /// Additionally, single quote `'` can join with identifiers to form + /// lifetimes `'ident`. + Joint, +} + +impl Punct { + /// Creates a new `Punct` from the given character and spacing. + /// + /// The `ch` argument must be a valid punctuation character permitted by the + /// language, otherwise the function will panic. + /// + /// The returned `Punct` will have the default span of `Span::call_site()` + /// which can be further configured with the `set_span` method below. + pub fn new(op: char, spacing: Spacing) -> Punct { + Punct { + op: op, + spacing: spacing, + span: Span::call_site(), + } + } + + /// Returns the value of this punctuation character as `char`. + pub fn as_char(&self) -> char { + self.op + } + + /// Returns the spacing of this punctuation character, indicating whether + /// it's immediately followed by another `Punct` in the token stream, so + /// they can potentially be combined into a multicharacter operator + /// (`Joint`), or it's followed by some other token or whitespace (`Alone`) + /// so the operator has certainly ended. + pub fn spacing(&self) -> Spacing { + self.spacing + } + + /// Returns the span for this punctuation character. + pub fn span(&self) -> Span { + self.span + } + + /// Configure the span for this punctuation character. + pub fn set_span(&mut self, span: Span) { + self.span = span; + } +} + +/// Prints the punctuation character as a string that should be losslessly +/// convertible back into the same character. +impl fmt::Display for Punct { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.op.fmt(f) + } +} + +impl fmt::Debug for Punct { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut debug = fmt.debug_struct("Punct"); + debug.field("op", &self.op); + debug.field("spacing", &self.spacing); + #[cfg(procmacro2_semver_exempt)] + debug.field("span", &self.span); + debug.finish() + } +} + +/// A word of Rust code, which may be a keyword or legal variable name. +/// +/// An identifier consists of at least one Unicode code point, the first of +/// which has the XID_Start property and the rest of which have the XID_Continue +/// property. +/// +/// - The empty string is not an identifier. Use `Option`. +/// - A lifetime is not an identifier. Use `syn::Lifetime` instead. +/// +/// An identifier constructed with `Ident::new` is permitted to be a Rust +/// keyword, though parsing one through its [`Synom`] implementation rejects +/// Rust keywords. Use `call!(Ident::parse_any)` when parsing to match the +/// behaviour of `Ident::new`. +/// +/// [`Synom`]: https://docs.rs/syn/0.14/syn/synom/trait.Synom.html +/// +/// # Examples +/// +/// A new ident can be created from a string using the `Ident::new` function. +/// A span must be provided explicitly which governs the name resolution +/// behavior of the resulting identifier. +/// +/// ```rust +/// extern crate proc_macro2; +/// +/// use proc_macro2::{Ident, Span}; +/// +/// fn main() { +/// let call_ident = Ident::new("calligraphy", Span::call_site()); +/// +/// println!("{}", call_ident); +/// } +/// ``` +/// +/// An ident can be interpolated into a token stream using the `quote!` macro. +/// +/// ```rust +/// #[macro_use] +/// extern crate quote; +/// +/// extern crate proc_macro2; +/// +/// use proc_macro2::{Ident, Span}; +/// +/// fn main() { +/// let ident = Ident::new("demo", Span::call_site()); +/// +/// // Create a variable binding whose name is this ident. +/// let expanded = quote! { let #ident = 10; }; +/// +/// // Create a variable binding with a slightly different name. +/// let temp_ident = Ident::new(&format!("new_{}", ident), Span::call_site()); +/// let expanded = quote! { let #temp_ident = 10; }; +/// } +/// ``` +/// +/// A string representation of the ident is available through the `to_string()` +/// method. +/// +/// ```rust +/// # extern crate proc_macro2; +/// # +/// # use proc_macro2::{Ident, Span}; +/// # +/// # let ident = Ident::new("another_identifier", Span::call_site()); +/// # +/// // Examine the ident as a string. +/// let ident_string = ident.to_string(); +/// if ident_string.len() > 60 { +/// println!("Very long identifier: {}", ident_string) +/// } +/// ``` +#[derive(Clone)] +pub struct Ident { + inner: imp::Ident, + _marker: marker::PhantomData>, +} + +impl Ident { + fn _new(inner: imp::Ident) -> Ident { + Ident { + inner: inner, + _marker: marker::PhantomData, + } + } + + /// Creates a new `Ident` with the given `string` as well as the specified + /// `span`. + /// + /// The `string` argument must be a valid identifier permitted by the + /// language, otherwise the function will panic. + /// + /// Note that `span`, currently in rustc, configures the hygiene information + /// for this identifier. + /// + /// As of this time `Span::call_site()` explicitly opts-in to "call-site" + /// hygiene meaning that identifiers created with this span will be resolved + /// as if they were written directly at the location of the macro call, and + /// other code at the macro call site will be able to refer to them as well. + /// + /// Later spans like `Span::def_site()` will allow to opt-in to + /// "definition-site" hygiene meaning that identifiers created with this + /// span will be resolved at the location of the macro definition and other + /// code at the macro call site will not be able to refer to them. + /// + /// Due to the current importance of hygiene this constructor, unlike other + /// tokens, requires a `Span` to be specified at construction. + /// + /// # Panics + /// + /// Panics if the input string is neither a keyword nor a legal variable + /// name. + pub fn new(string: &str, span: Span) -> Ident { + Ident::_new(imp::Ident::new(string, span.inner)) + } + + /// Same as `Ident::new`, but creates a raw identifier (`r#ident`). + /// + /// This method is semver exempt and not exposed by default. + #[cfg(procmacro2_semver_exempt)] + pub fn new_raw(string: &str, span: Span) -> Ident { + Ident::_new_raw(string, span) + } + + fn _new_raw(string: &str, span: Span) -> Ident { + Ident::_new(imp::Ident::new_raw(string, span.inner)) + } + + /// Returns the span of this `Ident`. + pub fn span(&self) -> Span { + Span::_new(self.inner.span()) + } + + /// Configures the span of this `Ident`, possibly changing its hygiene + /// context. + pub fn set_span(&mut self, span: Span) { + self.inner.set_span(span.inner); + } +} + +impl PartialEq for Ident { + fn eq(&self, other: &Ident) -> bool { + self.inner == other.inner + } +} + +impl PartialEq for Ident +where + T: ?Sized + AsRef, +{ + fn eq(&self, other: &T) -> bool { + self.inner == other + } +} + +impl Eq for Ident {} + +impl PartialOrd for Ident { + fn partial_cmp(&self, other: &Ident) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Ident { + fn cmp(&self, other: &Ident) -> Ordering { + self.to_string().cmp(&other.to_string()) + } +} + +impl Hash for Ident { + fn hash(&self, hasher: &mut H) { + self.to_string().hash(hasher) + } +} + +/// Prints the identifier as a string that should be losslessly convertible back +/// into the same identifier. +impl fmt::Display for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} + +impl fmt::Debug for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} + +/// A literal string (`"hello"`), byte string (`b"hello"`), character (`'a'`), +/// byte character (`b'a'`), an integer or floating point number with or without +/// a suffix (`1`, `1u8`, `2.3`, `2.3f32`). +/// +/// Boolean literals like `true` and `false` do not belong here, they are +/// `Ident`s. +#[derive(Clone)] +pub struct Literal { + inner: imp::Literal, + _marker: marker::PhantomData>, +} + +macro_rules! suffixed_int_literals { + ($($name:ident => $kind:ident,)*) => ($( + /// Creates a new suffixed integer literal with the specified value. + /// + /// This function will create an integer like `1u32` where the integer + /// value specified is the first part of the token and the integral is + /// also suffixed at the end. Literals created from negative numbers may + /// not survive rountrips through `TokenStream` or strings and may be + /// broken into two tokens (`-` and positive literal). + /// + /// Literals created through this method have the `Span::call_site()` + /// span by default, which can be configured with the `set_span` method + /// below. + pub fn $name(n: $kind) -> Literal { + Literal::_new(imp::Literal::$name(n)) + } + )*) +} + +macro_rules! unsuffixed_int_literals { + ($($name:ident => $kind:ident,)*) => ($( + /// Creates a new unsuffixed integer literal with the specified value. + /// + /// This function will create an integer like `1` where the integer + /// value specified is the first part of the token. No suffix is + /// specified on this token, meaning that invocations like + /// `Literal::i8_unsuffixed(1)` are equivalent to + /// `Literal::u32_unsuffixed(1)`. Literals created from negative numbers + /// may not survive rountrips through `TokenStream` or strings and may + /// be broken into two tokens (`-` and positive literal). + /// + /// Literals created through this method have the `Span::call_site()` + /// span by default, which can be configured with the `set_span` method + /// below. + pub fn $name(n: $kind) -> Literal { + Literal::_new(imp::Literal::$name(n)) + } + )*) +} + +impl Literal { + fn _new(inner: imp::Literal) -> Literal { + Literal { + inner: inner, + _marker: marker::PhantomData, + } + } + + fn _new_stable(inner: stable::Literal) -> Literal { + Literal { + inner: inner.into(), + _marker: marker::PhantomData, + } + } + + suffixed_int_literals! { + u8_suffixed => u8, + u16_suffixed => u16, + u32_suffixed => u32, + u64_suffixed => u64, + usize_suffixed => usize, + i8_suffixed => i8, + i16_suffixed => i16, + i32_suffixed => i32, + i64_suffixed => i64, + isize_suffixed => isize, + } + + unsuffixed_int_literals! { + u8_unsuffixed => u8, + u16_unsuffixed => u16, + u32_unsuffixed => u32, + u64_unsuffixed => u64, + usize_unsuffixed => usize, + i8_unsuffixed => i8, + i16_unsuffixed => i16, + i32_unsuffixed => i32, + i64_unsuffixed => i64, + isize_unsuffixed => isize, + } + + pub fn f64_unsuffixed(f: f64) -> Literal { + assert!(f.is_finite()); + Literal::_new(imp::Literal::f64_unsuffixed(f)) + } + + pub fn f64_suffixed(f: f64) -> Literal { + assert!(f.is_finite()); + Literal::_new(imp::Literal::f64_suffixed(f)) + } + + /// Creates a new unsuffixed floating-point literal. + /// + /// This constructor is similar to those like `Literal::i8_unsuffixed` where + /// the float's value is emitted directly into the token but no suffix is + /// used, so it may be inferred to be a `f64` later in the compiler. + /// Literals created from negative numbers may not survive rountrips through + /// `TokenStream` or strings and may be broken into two tokens (`-` and + /// positive literal). + /// + /// # Panics + /// + /// This function requires that the specified float is finite, for example + /// if it is infinity or NaN this function will panic. + pub fn f32_unsuffixed(f: f32) -> Literal { + assert!(f.is_finite()); + Literal::_new(imp::Literal::f32_unsuffixed(f)) + } + + pub fn f32_suffixed(f: f32) -> Literal { + assert!(f.is_finite()); + Literal::_new(imp::Literal::f32_suffixed(f)) + } + + pub fn string(string: &str) -> Literal { + Literal::_new(imp::Literal::string(string)) + } + + pub fn character(ch: char) -> Literal { + Literal::_new(imp::Literal::character(ch)) + } + + pub fn byte_string(s: &[u8]) -> Literal { + Literal::_new(imp::Literal::byte_string(s)) + } + + pub fn span(&self) -> Span { + Span::_new(self.inner.span()) + } + + pub fn set_span(&mut self, span: Span) { + self.inner.set_span(span.inner); + } +} + +impl fmt::Debug for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} + +impl fmt::Display for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} + +/// Public implementation details for the `TokenStream` type, such as iterators. +pub mod token_stream { + use std::fmt; + use std::marker; + use std::rc::Rc; + + use imp; + pub use TokenStream; + use TokenTree; + + /// An iterator over `TokenStream`'s `TokenTree`s. + /// + /// The iteration is "shallow", e.g. the iterator doesn't recurse into + /// delimited groups, and returns whole groups as token trees. + pub struct IntoIter { + inner: imp::TokenTreeIter, + _marker: marker::PhantomData>, + } + + impl Iterator for IntoIter { + type Item = TokenTree; + + fn next(&mut self) -> Option { + self.inner.next() + } + } + + impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } + } + + impl IntoIterator for TokenStream { + type Item = TokenTree; + type IntoIter = IntoIter; + + fn into_iter(self) -> IntoIter { + IntoIter { + inner: self.inner.into_iter(), + _marker: marker::PhantomData, + } + } + } +} --- /dev/null +++ b/vendor/proc-macro2-0.4.19/src/stable.rs @@ -0,0 +1,1325 @@ +#![cfg_attr(not(procmacro2_semver_exempt), allow(dead_code))] + +#[cfg(procmacro2_semver_exempt)] +use std::cell::RefCell; +#[cfg(procmacro2_semver_exempt)] +use std::cmp; +use std::fmt; +use std::iter; +use std::str::FromStr; +use std::vec; + +use strnom::{block_comment, skip_whitespace, whitespace, word_break, Cursor, PResult}; +use unicode_xid::UnicodeXID; + +use {Delimiter, Group, Punct, Spacing, TokenTree}; + +#[derive(Clone)] +pub struct TokenStream { + inner: Vec, +} + +#[derive(Debug)] +pub struct LexError; + +impl TokenStream { + pub fn new() -> TokenStream { + TokenStream { inner: Vec::new() } + } + + pub fn is_empty(&self) -> bool { + self.inner.len() == 0 + } +} + +#[cfg(procmacro2_semver_exempt)] +fn get_cursor(src: &str) -> Cursor { + // Create a dummy file & add it to the codemap + CODEMAP.with(|cm| { + let mut cm = cm.borrow_mut(); + let name = format!("", cm.files.len()); + let span = cm.add_file(&name, src); + Cursor { + rest: src, + off: span.lo, + } + }) +} + +#[cfg(not(procmacro2_semver_exempt))] +fn get_cursor(src: &str) -> Cursor { + Cursor { rest: src } +} + +impl FromStr for TokenStream { + type Err = LexError; + + fn from_str(src: &str) -> Result { + // Create a dummy file & add it to the codemap + let cursor = get_cursor(src); + + match token_stream(cursor) { + Ok((input, output)) => { + if skip_whitespace(input).len() != 0 { + Err(LexError) + } else { + Ok(output) + } + } + Err(LexError) => Err(LexError), + } + } +} + +impl fmt::Display for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut joint = false; + for (i, tt) in self.inner.iter().enumerate() { + if i != 0 && !joint { + write!(f, " ")?; + } + joint = false; + match *tt { + TokenTree::Group(ref tt) => { + let (start, end) = match tt.delimiter() { + Delimiter::Parenthesis => ("(", ")"), + Delimiter::Brace => ("{", "}"), + Delimiter::Bracket => ("[", "]"), + Delimiter::None => ("", ""), + }; + if tt.stream().into_iter().next().is_none() { + write!(f, "{} {}", start, end)? + } else { + write!(f, "{} {} {}", start, tt.stream(), end)? + } + } + TokenTree::Ident(ref tt) => write!(f, "{}", tt)?, + TokenTree::Punct(ref tt) => { + write!(f, "{}", tt.as_char())?; + match tt.spacing() { + Spacing::Alone => {} + Spacing::Joint => joint = true, + } + } + TokenTree::Literal(ref tt) => write!(f, "{}", tt)?, + } + } + + Ok(()) + } +} + +impl fmt::Debug for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("TokenStream ")?; + f.debug_list().entries(self.clone()).finish() + } +} + +#[cfg(use_proc_macro)] +impl From<::proc_macro::TokenStream> for TokenStream { + fn from(inner: ::proc_macro::TokenStream) -> TokenStream { + inner + .to_string() + .parse() + .expect("compiler token stream parse failed") + } +} + +#[cfg(use_proc_macro)] +impl From for ::proc_macro::TokenStream { + fn from(inner: TokenStream) -> ::proc_macro::TokenStream { + inner + .to_string() + .parse() + .expect("failed to parse to compiler tokens") + } +} + +impl From for TokenStream { + fn from(tree: TokenTree) -> TokenStream { + TokenStream { inner: vec![tree] } + } +} + +impl iter::FromIterator for TokenStream { + fn from_iter>(streams: I) -> Self { + let mut v = Vec::new(); + + for token in streams.into_iter() { + v.push(token); + } + + TokenStream { inner: v } + } +} + +impl iter::FromIterator for TokenStream { + fn from_iter>(streams: I) -> Self { + let mut v = Vec::new(); + + for stream in streams.into_iter() { + v.extend(stream.inner); + } + + TokenStream { inner: v } + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + self.inner.extend(streams); + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + self.inner + .extend(streams.into_iter().flat_map(|stream| stream)); + } +} + +pub type TokenTreeIter = vec::IntoIter; + +impl IntoIterator for TokenStream { + type Item = TokenTree; + type IntoIter = TokenTreeIter; + + fn into_iter(self) -> TokenTreeIter { + self.inner.into_iter() + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct FileName(String); + +#[allow(dead_code)] +pub fn file_name(s: String) -> FileName { + FileName(s) +} + +impl fmt::Display for FileName { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +#[derive(Clone, PartialEq, Eq)] +pub struct SourceFile { + name: FileName, +} + +impl SourceFile { + /// Get the path to this source file as a string. + pub fn path(&self) -> &FileName { + &self.name + } + + pub fn is_real(&self) -> bool { + // XXX(nika): Support real files in the future? + false + } +} + +impl AsRef for SourceFile { + fn as_ref(&self) -> &FileName { + self.path() + } +} + +impl fmt::Debug for SourceFile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SourceFile") + .field("path", &self.path()) + .field("is_real", &self.is_real()) + .finish() + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct LineColumn { + pub line: usize, + pub column: usize, +} + +#[cfg(procmacro2_semver_exempt)] +thread_local! { + static CODEMAP: RefCell = RefCell::new(Codemap { + // NOTE: We start with a single dummy file which all call_site() and + // def_site() spans reference. + files: vec![FileInfo { + name: "".to_owned(), + span: Span { lo: 0, hi: 0 }, + lines: vec![0], + }], + }); +} + +#[cfg(procmacro2_semver_exempt)] +struct FileInfo { + name: String, + span: Span, + lines: Vec, +} + +#[cfg(procmacro2_semver_exempt)] +impl FileInfo { + fn offset_line_column(&self, offset: usize) -> LineColumn { + assert!(self.span_within(Span { + lo: offset as u32, + hi: offset as u32 + })); + let offset = offset - self.span.lo as usize; + match self.lines.binary_search(&offset) { + Ok(found) => LineColumn { + line: found + 1, + column: 0, + }, + Err(idx) => LineColumn { + line: idx, + column: offset - self.lines[idx - 1], + }, + } + } + + fn span_within(&self, span: Span) -> bool { + span.lo >= self.span.lo && span.hi <= self.span.hi + } +} + +/// Computesthe offsets of each line in the given source string. +#[cfg(procmacro2_semver_exempt)] +fn lines_offsets(s: &str) -> Vec { + let mut lines = vec![0]; + let mut prev = 0; + while let Some(len) = s[prev..].find('\n') { + prev += len + 1; + lines.push(prev); + } + lines +} + +#[cfg(procmacro2_semver_exempt)] +struct Codemap { + files: Vec, +} + +#[cfg(procmacro2_semver_exempt)] +impl Codemap { + fn next_start_pos(&self) -> u32 { + // Add 1 so there's always space between files. + // + // We'll always have at least 1 file, as we initialize our files list + // with a dummy file. + self.files.last().unwrap().span.hi + 1 + } + + fn add_file(&mut self, name: &str, src: &str) -> Span { + let lines = lines_offsets(src); + let lo = self.next_start_pos(); + // XXX(nika): Shouild we bother doing a checked cast or checked add here? + let span = Span { + lo: lo, + hi: lo + (src.len() as u32), + }; + + self.files.push(FileInfo { + name: name.to_owned(), + span: span, + lines: lines, + }); + + span + } + + fn fileinfo(&self, span: Span) -> &FileInfo { + for file in &self.files { + if file.span_within(span) { + return file; + } + } + panic!("Invalid span with no related FileInfo!"); + } +} + +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct Span { + #[cfg(procmacro2_semver_exempt)] + lo: u32, + #[cfg(procmacro2_semver_exempt)] + hi: u32, +} + +impl Span { + #[cfg(not(procmacro2_semver_exempt))] + pub fn call_site() -> Span { + Span {} + } + + #[cfg(procmacro2_semver_exempt)] + pub fn call_site() -> Span { + Span { lo: 0, hi: 0 } + } + + pub fn def_site() -> Span { + Span::call_site() + } + + pub fn resolved_at(&self, _other: Span) -> Span { + // Stable spans consist only of line/column information, so + // `resolved_at` and `located_at` only select which span the + // caller wants line/column information from. + *self + } + + pub fn located_at(&self, other: Span) -> Span { + other + } + + #[cfg(procmacro2_semver_exempt)] + pub fn source_file(&self) -> SourceFile { + CODEMAP.with(|cm| { + let cm = cm.borrow(); + let fi = cm.fileinfo(*self); + SourceFile { + name: FileName(fi.name.clone()), + } + }) + } + + #[cfg(procmacro2_semver_exempt)] + pub fn start(&self) -> LineColumn { + CODEMAP.with(|cm| { + let cm = cm.borrow(); + let fi = cm.fileinfo(*self); + fi.offset_line_column(self.lo as usize) + }) + } + + #[cfg(procmacro2_semver_exempt)] + pub fn end(&self) -> LineColumn { + CODEMAP.with(|cm| { + let cm = cm.borrow(); + let fi = cm.fileinfo(*self); + fi.offset_line_column(self.hi as usize) + }) + } + + #[cfg(procmacro2_semver_exempt)] + pub fn join(&self, other: Span) -> Option { + CODEMAP.with(|cm| { + let cm = cm.borrow(); + // If `other` is not within the same FileInfo as us, return None. + if !cm.fileinfo(*self).span_within(other) { + return None; + } + Some(Span { + lo: cmp::min(self.lo, other.lo), + hi: cmp::max(self.hi, other.hi), + }) + }) + } +} + +impl fmt::Debug for Span { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + #[cfg(procmacro2_semver_exempt)] + return write!(f, "bytes({}..{})", self.lo, self.hi); + + #[cfg(not(procmacro2_semver_exempt))] + write!(f, "Span") + } +} + +#[derive(Clone)] +pub struct Ident { + sym: String, + span: Span, + raw: bool, +} + +impl Ident { + fn _new(string: &str, raw: bool, span: Span) -> Ident { + validate_term(string); + + Ident { + sym: string.to_owned(), + span: span, + raw: raw, + } + } + + pub fn new(string: &str, span: Span) -> Ident { + Ident::_new(string, false, span) + } + + pub fn new_raw(string: &str, span: Span) -> Ident { + Ident::_new(string, true, span) + } + + pub fn span(&self) -> Span { + self.span + } + + pub fn set_span(&mut self, span: Span) { + self.span = span; + } +} + +#[inline] +fn is_ident_start(c: char) -> bool { + ('a' <= c && c <= 'z') + || ('A' <= c && c <= 'Z') + || c == '_' + || (c > '\x7f' && UnicodeXID::is_xid_start(c)) +} + +#[inline] +fn is_ident_continue(c: char) -> bool { + ('a' <= c && c <= 'z') + || ('A' <= c && c <= 'Z') + || c == '_' + || ('0' <= c && c <= '9') + || (c > '\x7f' && UnicodeXID::is_xid_continue(c)) +} + +fn validate_term(string: &str) { + let validate = string; + if validate.is_empty() { + panic!("Ident is not allowed to be empty; use Option"); + } + + if validate.bytes().all(|digit| digit >= b'0' && digit <= b'9') { + panic!("Ident cannot be a number; use Literal instead"); + } + + fn ident_ok(string: &str) -> bool { + let mut chars = string.chars(); + let first = chars.next().unwrap(); + if !is_ident_start(first) { + return false; + } + for ch in chars { + if !is_ident_continue(ch) { + return false; + } + } + true + } + + if !ident_ok(validate) { + panic!("{:?} is not a valid Ident", string); + } +} + +impl PartialEq for Ident { + fn eq(&self, other: &Ident) -> bool { + self.sym == other.sym && self.raw == other.raw + } +} + +impl PartialEq for Ident +where + T: ?Sized + AsRef, +{ + fn eq(&self, other: &T) -> bool { + let other = other.as_ref(); + if self.raw { + other.starts_with("r#") && self.sym == other[2..] + } else { + self.sym == other + } + } +} + +impl fmt::Display for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.raw { + "r#".fmt(f)?; + } + self.sym.fmt(f) + } +} + +impl fmt::Debug for Ident { + // Ident(proc_macro), Ident(r#union) + #[cfg(not(procmacro2_semver_exempt))] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut debug = f.debug_tuple("Ident"); + debug.field(&format_args!("{}", self)); + debug.finish() + } + + // Ident { + // sym: proc_macro, + // span: bytes(128..138) + // } + #[cfg(procmacro2_semver_exempt)] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut debug = f.debug_struct("Ident"); + debug.field("sym", &format_args!("{}", self)); + debug.field("span", &self.span); + debug.finish() + } +} + +#[derive(Clone)] +pub struct Literal { + text: String, + span: Span, +} + +macro_rules! suffixed_numbers { + ($($name:ident => $kind:ident,)*) => ($( + pub fn $name(n: $kind) -> Literal { + Literal::_new(format!(concat!("{}", stringify!($kind)), n)) + } + )*) +} + +macro_rules! unsuffixed_numbers { + ($($name:ident => $kind:ident,)*) => ($( + pub fn $name(n: $kind) -> Literal { + Literal::_new(n.to_string()) + } + )*) +} + +impl Literal { + fn _new(text: String) -> Literal { + Literal { + text: text, + span: Span::call_site(), + } + } + + suffixed_numbers! { + u8_suffixed => u8, + u16_suffixed => u16, + u32_suffixed => u32, + u64_suffixed => u64, + usize_suffixed => usize, + i8_suffixed => i8, + i16_suffixed => i16, + i32_suffixed => i32, + i64_suffixed => i64, + isize_suffixed => isize, + + f32_suffixed => f32, + f64_suffixed => f64, + } + + unsuffixed_numbers! { + u8_unsuffixed => u8, + u16_unsuffixed => u16, + u32_unsuffixed => u32, + u64_unsuffixed => u64, + usize_unsuffixed => usize, + i8_unsuffixed => i8, + i16_unsuffixed => i16, + i32_unsuffixed => i32, + i64_unsuffixed => i64, + isize_unsuffixed => isize, + } + + pub fn f32_unsuffixed(f: f32) -> Literal { + let mut s = f.to_string(); + if !s.contains(".") { + s.push_str(".0"); + } + Literal::_new(s) + } + + pub fn f64_unsuffixed(f: f64) -> Literal { + let mut s = f.to_string(); + if !s.contains(".") { + s.push_str(".0"); + } + Literal::_new(s) + } + + pub fn string(t: &str) -> Literal { + let mut s = t + .chars() + .flat_map(|c| c.escape_default()) + .collect::(); + s.push('"'); + s.insert(0, '"'); + Literal::_new(s) + } + + pub fn character(t: char) -> Literal { + Literal::_new(format!("'{}'", t.escape_default().collect::())) + } + + pub fn byte_string(bytes: &[u8]) -> Literal { + let mut escaped = "b\"".to_string(); + for b in bytes { + match *b { + b'\0' => escaped.push_str(r"\0"), + b'\t' => escaped.push_str(r"\t"), + b'\n' => escaped.push_str(r"\n"), + b'\r' => escaped.push_str(r"\r"), + b'"' => escaped.push_str("\\\""), + b'\\' => escaped.push_str("\\\\"), + b'\x20'...b'\x7E' => escaped.push(*b as char), + _ => escaped.push_str(&format!("\\x{:02X}", b)), + } + } + escaped.push('"'); + Literal::_new(escaped) + } + + pub fn span(&self) -> Span { + self.span + } + + pub fn set_span(&mut self, span: Span) { + self.span = span; + } +} + +impl fmt::Display for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.text.fmt(f) + } +} + +impl fmt::Debug for Literal { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut debug = fmt.debug_struct("Literal"); + debug.field("lit", &format_args!("{}", self.text)); + #[cfg(procmacro2_semver_exempt)] + debug.field("span", &self.span); + debug.finish() + } +} + +fn token_stream(mut input: Cursor) -> PResult { + let mut trees = Vec::new(); + loop { + let input_no_ws = skip_whitespace(input); + if input_no_ws.rest.len() == 0 { + break; + } + if let Ok((a, tokens)) = doc_comment(input_no_ws) { + input = a; + trees.extend(tokens); + continue; + } + + let (a, tt) = match token_tree(input_no_ws) { + Ok(p) => p, + Err(_) => break, + }; + trees.push(tt); + input = a; + } + Ok((input, TokenStream { inner: trees })) +} + +#[cfg(not(procmacro2_semver_exempt))] +fn spanned<'a, T>( + input: Cursor<'a>, + f: fn(Cursor<'a>) -> PResult<'a, T>, +) -> PResult<'a, (T, ::Span)> { + let (a, b) = f(skip_whitespace(input))?; + Ok((a, ((b, ::Span::_new_stable(Span {}))))) +} + +#[cfg(procmacro2_semver_exempt)] +fn spanned<'a, T>( + input: Cursor<'a>, + f: fn(Cursor<'a>) -> PResult<'a, T>, +) -> PResult<'a, (T, ::Span)> { + let input = skip_whitespace(input); + let lo = input.off; + let (a, b) = f(input)?; + let hi = a.off; + let span = ::Span::_new_stable(Span { lo: lo, hi: hi }); + Ok((a, (b, span))) +} + +fn token_tree(input: Cursor) -> PResult { + let (rest, (mut tt, span)) = spanned(input, token_kind)?; + tt.set_span(span); + Ok((rest, tt)) +} + +named!(token_kind -> TokenTree, alt!( + map!(group, TokenTree::Group) + | + map!(literal, |l| TokenTree::Literal(::Literal::_new_stable(l))) // must be before symbol + | + map!(op, TokenTree::Punct) + | + symbol_leading_ws +)); + +named!(group -> Group, alt!( + delimited!( + punct!("("), + token_stream, + punct!(")") + ) => { |ts| Group::new(Delimiter::Parenthesis, ::TokenStream::_new_stable(ts)) } + | + delimited!( + punct!("["), + token_stream, + punct!("]") + ) => { |ts| Group::new(Delimiter::Bracket, ::TokenStream::_new_stable(ts)) } + | + delimited!( + punct!("{"), + token_stream, + punct!("}") + ) => { |ts| Group::new(Delimiter::Brace, ::TokenStream::_new_stable(ts)) } +)); + +fn symbol_leading_ws(input: Cursor) -> PResult { + symbol(skip_whitespace(input)) +} + +fn symbol(input: Cursor) -> PResult { + let mut chars = input.char_indices(); + + let raw = input.starts_with("r#"); + if raw { + chars.next(); + chars.next(); + } + + match chars.next() { + Some((_, ch)) if is_ident_start(ch) => {} + _ => return Err(LexError), + } + + let mut end = input.len(); + for (i, ch) in chars { + if !is_ident_continue(ch) { + end = i; + break; + } + } + + let a = &input.rest[..end]; + if a == "r#_" { + Err(LexError) + } else { + let ident = if raw { + ::Ident::_new_raw(&a[2..], ::Span::call_site()) + } else { + ::Ident::new(a, ::Span::call_site()) + }; + Ok((input.advance(end), ident.into())) + } +} + +fn literal(input: Cursor) -> PResult { + let input_no_ws = skip_whitespace(input); + + match literal_nocapture(input_no_ws) { + Ok((a, ())) => { + let start = input.len() - input_no_ws.len(); + let len = input_no_ws.len() - a.len(); + let end = start + len; + Ok((a, Literal::_new(input.rest[start..end].to_string()))) + } + Err(LexError) => Err(LexError), + } +} + +named!(literal_nocapture -> (), alt!( + string + | + byte_string + | + byte + | + character + | + float + | + int +)); + +named!(string -> (), alt!( + quoted_string + | + preceded!( + punct!("r"), + raw_string + ) => { |_| () } +)); + +named!(quoted_string -> (), delimited!( + punct!("\""), + cooked_string, + tag!("\"") +)); + +fn cooked_string(input: Cursor) -> PResult<()> { + let mut chars = input.char_indices().peekable(); + while let Some((byte_offset, ch)) = chars.next() { + match ch { + '"' => { + return Ok((input.advance(byte_offset), ())); + } + '\r' => { + if let Some((_, '\n')) = chars.next() { + // ... + } else { + break; + } + } + '\\' => match chars.next() { + Some((_, 'x')) => { + if !backslash_x_char(&mut chars) { + break; + } + } + Some((_, 'n')) | Some((_, 'r')) | Some((_, 't')) | Some((_, '\\')) + | Some((_, '\'')) | Some((_, '"')) | Some((_, '0')) => {} + Some((_, 'u')) => { + if !backslash_u(&mut chars) { + break; + } + } + Some((_, '\n')) | Some((_, '\r')) => { + while let Some(&(_, ch)) = chars.peek() { + if ch.is_whitespace() { + chars.next(); + } else { + break; + } + } + } + _ => break, + }, + _ch => {} + } + } + Err(LexError) +} + +named!(byte_string -> (), alt!( + delimited!( + punct!("b\""), + cooked_byte_string, + tag!("\"") + ) => { |_| () } + | + preceded!( + punct!("br"), + raw_string + ) => { |_| () } +)); + +fn cooked_byte_string(mut input: Cursor) -> PResult<()> { + let mut bytes = input.bytes().enumerate(); + 'outer: while let Some((offset, b)) = bytes.next() { + match b { + b'"' => { + return Ok((input.advance(offset), ())); + } + b'\r' => { + if let Some((_, b'\n')) = bytes.next() { + // ... + } else { + break; + } + } + b'\\' => match bytes.next() { + Some((_, b'x')) => { + if !backslash_x_byte(&mut bytes) { + break; + } + } + Some((_, b'n')) | Some((_, b'r')) | Some((_, b't')) | Some((_, b'\\')) + | Some((_, b'0')) | Some((_, b'\'')) | Some((_, b'"')) => {} + Some((newline, b'\n')) | Some((newline, b'\r')) => { + let rest = input.advance(newline + 1); + for (offset, ch) in rest.char_indices() { + if !ch.is_whitespace() { + input = rest.advance(offset); + bytes = input.bytes().enumerate(); + continue 'outer; + } + } + break; + } + _ => break, + }, + b if b < 0x80 => {} + _ => break, + } + } + Err(LexError) +} + +fn raw_string(input: Cursor) -> PResult<()> { + let mut chars = input.char_indices(); + let mut n = 0; + while let Some((byte_offset, ch)) = chars.next() { + match ch { + '"' => { + n = byte_offset; + break; + } + '#' => {} + _ => return Err(LexError), + } + } + for (byte_offset, ch) in chars { + match ch { + '"' if input.advance(byte_offset + 1).starts_with(&input.rest[..n]) => { + let rest = input.advance(byte_offset + 1 + n); + return Ok((rest, ())); + } + '\r' => {} + _ => {} + } + } + Err(LexError) +} + +named!(byte -> (), do_parse!( + punct!("b") >> + tag!("'") >> + cooked_byte >> + tag!("'") >> + (()) +)); + +fn cooked_byte(input: Cursor) -> PResult<()> { + let mut bytes = input.bytes().enumerate(); + let ok = match bytes.next().map(|(_, b)| b) { + Some(b'\\') => match bytes.next().map(|(_, b)| b) { + Some(b'x') => backslash_x_byte(&mut bytes), + Some(b'n') | Some(b'r') | Some(b't') | Some(b'\\') | Some(b'0') | Some(b'\'') + | Some(b'"') => true, + _ => false, + }, + b => b.is_some(), + }; + if ok { + match bytes.next() { + Some((offset, _)) => { + if input.chars().as_str().is_char_boundary(offset) { + Ok((input.advance(offset), ())) + } else { + Err(LexError) + } + } + None => Ok((input.advance(input.len()), ())), + } + } else { + Err(LexError) + } +} + +named!(character -> (), do_parse!( + punct!("'") >> + cooked_char >> + tag!("'") >> + (()) +)); + +fn cooked_char(input: Cursor) -> PResult<()> { + let mut chars = input.char_indices(); + let ok = match chars.next().map(|(_, ch)| ch) { + Some('\\') => match chars.next().map(|(_, ch)| ch) { + Some('x') => backslash_x_char(&mut chars), + Some('u') => backslash_u(&mut chars), + Some('n') | Some('r') | Some('t') | Some('\\') | Some('0') | Some('\'') | Some('"') => { + true + } + _ => false, + }, + ch => ch.is_some(), + }; + if ok { + match chars.next() { + Some((idx, _)) => Ok((input.advance(idx), ())), + None => Ok((input.advance(input.len()), ())), + } + } else { + Err(LexError) + } +} + +macro_rules! next_ch { + ($chars:ident @ $pat:pat $(| $rest:pat)*) => { + match $chars.next() { + Some((_, ch)) => match ch { + $pat $(| $rest)* => ch, + _ => return false, + }, + None => return false + } + }; +} + +fn backslash_x_char(chars: &mut I) -> bool +where + I: Iterator, +{ + next_ch!(chars @ '0'...'7'); + next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F'); + true +} + +fn backslash_x_byte(chars: &mut I) -> bool +where + I: Iterator, +{ + next_ch!(chars @ b'0'...b'9' | b'a'...b'f' | b'A'...b'F'); + next_ch!(chars @ b'0'...b'9' | b'a'...b'f' | b'A'...b'F'); + true +} + +fn backslash_u(chars: &mut I) -> bool +where + I: Iterator, +{ + next_ch!(chars @ '{'); + next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F'); + loop { + let c = next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F' | '_' | '}'); + if c == '}' { + return true; + } + } +} + +fn float(input: Cursor) -> PResult<()> { + let (rest, ()) = float_digits(input)?; + for suffix in &["f32", "f64"] { + if rest.starts_with(suffix) { + return word_break(rest.advance(suffix.len())); + } + } + word_break(rest) +} + +fn float_digits(input: Cursor) -> PResult<()> { + let mut chars = input.chars().peekable(); + match chars.next() { + Some(ch) if ch >= '0' && ch <= '9' => {} + _ => return Err(LexError), + } + + let mut len = 1; + let mut has_dot = false; + let mut has_exp = false; + while let Some(&ch) = chars.peek() { + match ch { + '0'...'9' | '_' => { + chars.next(); + len += 1; + } + '.' => { + if has_dot { + break; + } + chars.next(); + if chars + .peek() + .map(|&ch| ch == '.' || UnicodeXID::is_xid_start(ch)) + .unwrap_or(false) + { + return Err(LexError); + } + len += 1; + has_dot = true; + } + 'e' | 'E' => { + chars.next(); + len += 1; + has_exp = true; + break; + } + _ => break, + } + } + + let rest = input.advance(len); + if !(has_dot || has_exp || rest.starts_with("f32") || rest.starts_with("f64")) { + return Err(LexError); + } + + if has_exp { + let mut has_exp_value = false; + while let Some(&ch) = chars.peek() { + match ch { + '+' | '-' => { + if has_exp_value { + break; + } + chars.next(); + len += 1; + } + '0'...'9' => { + chars.next(); + len += 1; + has_exp_value = true; + } + '_' => { + chars.next(); + len += 1; + } + _ => break, + } + } + if !has_exp_value { + return Err(LexError); + } + } + + Ok((input.advance(len), ())) +} + +fn int(input: Cursor) -> PResult<()> { + let (rest, ()) = digits(input)?; + for suffix in &[ + "isize", "i8", "i16", "i32", "i64", "i128", "usize", "u8", "u16", "u32", "u64", "u128", + ] { + if rest.starts_with(suffix) { + return word_break(rest.advance(suffix.len())); + } + } + word_break(rest) +} + +fn digits(mut input: Cursor) -> PResult<()> { + let base = if input.starts_with("0x") { + input = input.advance(2); + 16 + } else if input.starts_with("0o") { + input = input.advance(2); + 8 + } else if input.starts_with("0b") { + input = input.advance(2); + 2 + } else { + 10 + }; + + let mut len = 0; + let mut empty = true; + for b in input.bytes() { + let digit = match b { + b'0'...b'9' => (b - b'0') as u64, + b'a'...b'f' => 10 + (b - b'a') as u64, + b'A'...b'F' => 10 + (b - b'A') as u64, + b'_' => { + if empty && base == 10 { + return Err(LexError); + } + len += 1; + continue; + } + _ => break, + }; + if digit >= base { + return Err(LexError); + } + len += 1; + empty = false; + } + if empty { + Err(LexError) + } else { + Ok((input.advance(len), ())) + } +} + +fn op(input: Cursor) -> PResult { + let input = skip_whitespace(input); + match op_char(input) { + Ok((rest, '\'')) => { + symbol(rest)?; + Ok((rest, Punct::new('\'', Spacing::Joint))) + } + Ok((rest, ch)) => { + let kind = match op_char(rest) { + Ok(_) => Spacing::Joint, + Err(LexError) => Spacing::Alone, + }; + Ok((rest, Punct::new(ch, kind))) + } + Err(LexError) => Err(LexError), + } +} + +fn op_char(input: Cursor) -> PResult { + if input.starts_with("//") || input.starts_with("/*") { + // Do not accept `/` of a comment as an op. + return Err(LexError); + } + + let mut chars = input.chars(); + let first = match chars.next() { + Some(ch) => ch, + None => { + return Err(LexError); + } + }; + let recognized = "~!@#$%^&*-=+|;:,<.>/?'"; + if recognized.contains(first) { + Ok((input.advance(first.len_utf8()), first)) + } else { + Err(LexError) + } +} + +fn doc_comment(input: Cursor) -> PResult> { + let mut trees = Vec::new(); + let (rest, ((comment, inner), span)) = spanned(input, doc_comment_contents)?; + trees.push(TokenTree::Punct(Punct::new('#', Spacing::Alone))); + if inner { + trees.push(Punct::new('!', Spacing::Alone).into()); + } + let mut stream = vec![ + TokenTree::Ident(::Ident::new("doc", span)), + TokenTree::Punct(Punct::new('=', Spacing::Alone)), + TokenTree::Literal(::Literal::string(comment)), + ]; + for tt in stream.iter_mut() { + tt.set_span(span); + } + trees.push(Group::new(Delimiter::Bracket, stream.into_iter().collect()).into()); + for tt in trees.iter_mut() { + tt.set_span(span); + } + Ok((rest, trees)) +} + +named!(doc_comment_contents -> (&str, bool), alt!( + do_parse!( + punct!("//!") >> + s: take_until_newline_or_eof!() >> + ((s, true)) + ) + | + do_parse!( + option!(whitespace) >> + peek!(tag!("/*!")) >> + s: block_comment >> + ((s, true)) + ) + | + do_parse!( + punct!("///") >> + not!(tag!("/")) >> + s: take_until_newline_or_eof!() >> + ((s, false)) + ) + | + do_parse!( + option!(whitespace) >> + peek!(tuple!(tag!("/**"), not!(tag!("*")))) >> + s: block_comment >> + ((s, false)) + ) +)); --- /dev/null +++ b/vendor/proc-macro2-0.4.19/src/strnom.rs @@ -0,0 +1,393 @@ +//! Adapted from [`nom`](https://github.com/Geal/nom). + +use std::str::{Bytes, CharIndices, Chars}; + +use unicode_xid::UnicodeXID; + +use stable::LexError; + +#[derive(Copy, Clone, Eq, PartialEq)] +pub struct Cursor<'a> { + pub rest: &'a str, + #[cfg(procmacro2_semver_exempt)] + pub off: u32, +} + +impl<'a> Cursor<'a> { + #[cfg(not(procmacro2_semver_exempt))] + pub fn advance(&self, amt: usize) -> Cursor<'a> { + Cursor { + rest: &self.rest[amt..], + } + } + #[cfg(procmacro2_semver_exempt)] + pub fn advance(&self, amt: usize) -> Cursor<'a> { + Cursor { + rest: &self.rest[amt..], + off: self.off + (amt as u32), + } + } + + pub fn find(&self, p: char) -> Option { + self.rest.find(p) + } + + pub fn starts_with(&self, s: &str) -> bool { + self.rest.starts_with(s) + } + + pub fn is_empty(&self) -> bool { + self.rest.is_empty() + } + + pub fn len(&self) -> usize { + self.rest.len() + } + + pub fn as_bytes(&self) -> &'a [u8] { + self.rest.as_bytes() + } + + pub fn bytes(&self) -> Bytes<'a> { + self.rest.bytes() + } + + pub fn chars(&self) -> Chars<'a> { + self.rest.chars() + } + + pub fn char_indices(&self) -> CharIndices<'a> { + self.rest.char_indices() + } +} + +pub type PResult<'a, O> = Result<(Cursor<'a>, O), LexError>; + +pub fn whitespace(input: Cursor) -> PResult<()> { + if input.is_empty() { + return Err(LexError); + } + + let bytes = input.as_bytes(); + let mut i = 0; + while i < bytes.len() { + let s = input.advance(i); + if bytes[i] == b'/' { + if s.starts_with("//") + && (!s.starts_with("///") || s.starts_with("////")) + && !s.starts_with("//!") + { + if let Some(len) = s.find('\n') { + i += len + 1; + continue; + } + break; + } else if s.starts_with("/**/") { + i += 4; + continue; + } else if s.starts_with("/*") + && (!s.starts_with("/**") || s.starts_with("/***")) + && !s.starts_with("/*!") + { + let (_, com) = block_comment(s)?; + i += com.len(); + continue; + } + } + match bytes[i] { + b' ' | 0x09...0x0d => { + i += 1; + continue; + } + b if b <= 0x7f => {} + _ => { + let ch = s.chars().next().unwrap(); + if is_whitespace(ch) { + i += ch.len_utf8(); + continue; + } + } + } + return if i > 0 { Ok((s, ())) } else { Err(LexError) }; + } + Ok((input.advance(input.len()), ())) +} + +pub fn block_comment(input: Cursor) -> PResult<&str> { + if !input.starts_with("/*") { + return Err(LexError); + } + + let mut depth = 0; + let bytes = input.as_bytes(); + let mut i = 0; + let upper = bytes.len() - 1; + while i < upper { + if bytes[i] == b'/' && bytes[i + 1] == b'*' { + depth += 1; + i += 1; // eat '*' + } else if bytes[i] == b'*' && bytes[i + 1] == b'/' { + depth -= 1; + if depth == 0 { + return Ok((input.advance(i + 2), &input.rest[..i + 2])); + } + i += 1; // eat '/' + } + i += 1; + } + Err(LexError) +} + +pub fn skip_whitespace(input: Cursor) -> Cursor { + match whitespace(input) { + Ok((rest, _)) => rest, + Err(LexError) => input, + } +} + +fn is_whitespace(ch: char) -> bool { + // Rust treats left-to-right mark and right-to-left mark as whitespace + ch.is_whitespace() || ch == '\u{200e}' || ch == '\u{200f}' +} + +pub fn word_break(input: Cursor) -> PResult<()> { + match input.chars().next() { + Some(ch) if UnicodeXID::is_xid_continue(ch) => Err(LexError), + Some(_) | None => Ok((input, ())), + } +} + +macro_rules! named { + ($name:ident -> $o:ty, $submac:ident!( $($args:tt)* )) => { + fn $name<'a>(i: Cursor<'a>) -> $crate::strnom::PResult<'a, $o> { + $submac!(i, $($args)*) + } + }; +} + +macro_rules! alt { + ($i:expr, $e:ident | $($rest:tt)*) => { + alt!($i, call!($e) | $($rest)*) + }; + + ($i:expr, $subrule:ident!( $($args:tt)*) | $($rest:tt)*) => { + match $subrule!($i, $($args)*) { + res @ Ok(_) => res, + _ => alt!($i, $($rest)*) + } + }; + + ($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr } | $($rest:tt)+) => { + match $subrule!($i, $($args)*) { + Ok((i, o)) => Ok((i, $gen(o))), + Err(LexError) => alt!($i, $($rest)*) + } + }; + + ($i:expr, $e:ident => { $gen:expr } | $($rest:tt)*) => { + alt!($i, call!($e) => { $gen } | $($rest)*) + }; + + ($i:expr, $e:ident => { $gen:expr }) => { + alt!($i, call!($e) => { $gen }) + }; + + ($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr }) => { + match $subrule!($i, $($args)*) { + Ok((i, o)) => Ok((i, $gen(o))), + Err(LexError) => Err(LexError), + } + }; + + ($i:expr, $e:ident) => { + alt!($i, call!($e)) + }; + + ($i:expr, $subrule:ident!( $($args:tt)*)) => { + $subrule!($i, $($args)*) + }; +} + +macro_rules! do_parse { + ($i:expr, ( $($rest:expr),* )) => { + Ok(($i, ( $($rest),* ))) + }; + + ($i:expr, $e:ident >> $($rest:tt)*) => { + do_parse!($i, call!($e) >> $($rest)*) + }; + + ($i:expr, $submac:ident!( $($args:tt)* ) >> $($rest:tt)*) => { + match $submac!($i, $($args)*) { + Err(LexError) => Err(LexError), + Ok((i, _)) => do_parse!(i, $($rest)*), + } + }; + + ($i:expr, $field:ident : $e:ident >> $($rest:tt)*) => { + do_parse!($i, $field: call!($e) >> $($rest)*) + }; + + ($i:expr, $field:ident : $submac:ident!( $($args:tt)* ) >> $($rest:tt)*) => { + match $submac!($i, $($args)*) { + Err(LexError) => Err(LexError), + Ok((i, o)) => { + let $field = o; + do_parse!(i, $($rest)*) + }, + } + }; +} + +macro_rules! peek { + ($i:expr, $submac:ident!( $($args:tt)* )) => { + match $submac!($i, $($args)*) { + Ok((_, o)) => Ok(($i, o)), + Err(LexError) => Err(LexError), + } + }; +} + +macro_rules! call { + ($i:expr, $fun:expr $(, $args:expr)*) => { + $fun($i $(, $args)*) + }; +} + +macro_rules! option { + ($i:expr, $f:expr) => { + match $f($i) { + Ok((i, o)) => Ok((i, Some(o))), + Err(LexError) => Ok(($i, None)), + } + }; +} + +macro_rules! take_until_newline_or_eof { + ($i:expr,) => {{ + if $i.len() == 0 { + Ok(($i, "")) + } else { + match $i.find('\n') { + Some(i) => Ok(($i.advance(i), &$i.rest[..i])), + None => Ok(($i.advance($i.len()), &$i.rest[..$i.len()])), + } + } + }}; +} + +macro_rules! tuple { + ($i:expr, $($rest:tt)*) => { + tuple_parser!($i, (), $($rest)*) + }; +} + +/// Do not use directly. Use `tuple!`. +macro_rules! tuple_parser { + ($i:expr, ($($parsed:tt),*), $e:ident, $($rest:tt)*) => { + tuple_parser!($i, ($($parsed),*), call!($e), $($rest)*) + }; + + ($i:expr, (), $submac:ident!( $($args:tt)* ), $($rest:tt)*) => { + match $submac!($i, $($args)*) { + Err(LexError) => Err(LexError), + Ok((i, o)) => tuple_parser!(i, (o), $($rest)*), + } + }; + + ($i:expr, ($($parsed:tt)*), $submac:ident!( $($args:tt)* ), $($rest:tt)*) => { + match $submac!($i, $($args)*) { + Err(LexError) => Err(LexError), + Ok((i, o)) => tuple_parser!(i, ($($parsed)* , o), $($rest)*), + } + }; + + ($i:expr, ($($parsed:tt),*), $e:ident) => { + tuple_parser!($i, ($($parsed),*), call!($e)) + }; + + ($i:expr, (), $submac:ident!( $($args:tt)* )) => { + $submac!($i, $($args)*) + }; + + ($i:expr, ($($parsed:expr),*), $submac:ident!( $($args:tt)* )) => { + match $submac!($i, $($args)*) { + Err(LexError) => Err(LexError), + Ok((i, o)) => Ok((i, ($($parsed),*, o))) + } + }; + + ($i:expr, ($($parsed:expr),*)) => { + Ok(($i, ($($parsed),*))) + }; +} + +macro_rules! not { + ($i:expr, $submac:ident!( $($args:tt)* )) => { + match $submac!($i, $($args)*) { + Ok((_, _)) => Err(LexError), + Err(LexError) => Ok(($i, ())), + } + }; +} + +macro_rules! tag { + ($i:expr, $tag:expr) => { + if $i.starts_with($tag) { + Ok(($i.advance($tag.len()), &$i.rest[..$tag.len()])) + } else { + Err(LexError) + } + }; +} + +macro_rules! punct { + ($i:expr, $punct:expr) => { + $crate::strnom::punct($i, $punct) + }; +} + +/// Do not use directly. Use `punct!`. +pub fn punct<'a>(input: Cursor<'a>, token: &'static str) -> PResult<'a, &'a str> { + let input = skip_whitespace(input); + if input.starts_with(token) { + Ok((input.advance(token.len()), token)) + } else { + Err(LexError) + } +} + +macro_rules! preceded { + ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => { + match tuple!($i, $submac!($($args)*), $submac2!($($args2)*)) { + Ok((remaining, (_, o))) => Ok((remaining, o)), + Err(LexError) => Err(LexError), + } + }; + + ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => { + preceded!($i, $submac!($($args)*), call!($g)) + }; +} + +macro_rules! delimited { + ($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)+) => { + match tuple_parser!($i, (), $submac!($($args)*), $($rest)*) { + Err(LexError) => Err(LexError), + Ok((i1, (_, o, _))) => Ok((i1, o)) + } + }; +} + +macro_rules! map { + ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => { + match $submac!($i, $($args)*) { + Err(LexError) => Err(LexError), + Ok((i, o)) => Ok((i, call!(o, $g))) + } + }; + + ($i:expr, $f:expr, $g:expr) => { + map!($i, call!($f), $g) + }; +} --- /dev/null +++ b/vendor/proc-macro2-0.4.19/src/unstable.rs @@ -0,0 +1,836 @@ +#![cfg_attr(not(super_unstable), allow(dead_code))] + +use std::fmt; +use std::iter; +use std::panic::{self, PanicInfo}; +use std::str::FromStr; + +use proc_macro; +use stable; + +use {Delimiter, Group, Punct, Spacing, TokenTree}; + +#[derive(Clone)] +pub enum TokenStream { + Nightly(proc_macro::TokenStream), + Stable(stable::TokenStream), +} + +pub enum LexError { + Nightly(proc_macro::LexError), + Stable(stable::LexError), +} + +fn nightly_works() -> bool { + use std::sync::atomic::*; + use std::sync::Once; + + static WORKS: AtomicUsize = ATOMIC_USIZE_INIT; + static INIT: Once = Once::new(); + + match WORKS.load(Ordering::SeqCst) { + 1 => return false, + 2 => return true, + _ => {} + } + + // Swap in a null panic hook to avoid printing "thread panicked" to stderr, + // then use catch_unwind to determine whether the compiler's proc_macro is + // working. When proc-macro2 is used from outside of a procedural macro all + // of the proc_macro crate's APIs currently panic. + // + // The Once is to prevent the possibility of this ordering: + // + // thread 1 calls take_hook, gets the user's original hook + // thread 1 calls set_hook with the null hook + // thread 2 calls take_hook, thinks null hook is the original hook + // thread 2 calls set_hook with the null hook + // thread 1 calls set_hook with the actual original hook + // thread 2 calls set_hook with what it thinks is the original hook + // + // in which the user's hook has been lost. + // + // There is still a race condition where a panic in a different thread can + // happen during the interval that the user's original panic hook is + // unregistered such that their hook is incorrectly not called. This is + // sufficiently unlikely and less bad than printing panic messages to stderr + // on correct use of this crate. Maybe there is a libstd feature request + // here. For now, if a user needs to guarantee that this failure mode does + // not occur, they need to call e.g. `proc_macro2::Span::call_site()` from + // the main thread before launching any other threads. + INIT.call_once(|| { + type PanicHook = Fn(&PanicInfo) + Sync + Send + 'static; + + let null_hook: Box = Box::new(|_panic_info| { /* ignore */ }); + let sanity_check = &*null_hook as *const PanicHook; + let original_hook = panic::take_hook(); + panic::set_hook(null_hook); + + let works = panic::catch_unwind(|| proc_macro::Span::call_site()).is_ok(); + WORKS.store(works as usize + 1, Ordering::SeqCst); + + let hopefully_null_hook = panic::take_hook(); + panic::set_hook(original_hook); + if sanity_check != &*hopefully_null_hook { + panic!("observed race condition in proc_macro2::nightly_works"); + } + }); + nightly_works() +} + +fn mismatch() -> ! { + panic!("stable/nightly mismatch") +} + +impl TokenStream { + pub fn new() -> TokenStream { + if nightly_works() { + TokenStream::Nightly(proc_macro::TokenStream::new()) + } else { + TokenStream::Stable(stable::TokenStream::new()) + } + } + + pub fn is_empty(&self) -> bool { + match self { + TokenStream::Nightly(tts) => tts.is_empty(), + TokenStream::Stable(tts) => tts.is_empty(), + } + } + + fn unwrap_nightly(self) -> proc_macro::TokenStream { + match self { + TokenStream::Nightly(s) => s, + TokenStream::Stable(_) => mismatch(), + } + } + + fn unwrap_stable(self) -> stable::TokenStream { + match self { + TokenStream::Nightly(_) => mismatch(), + TokenStream::Stable(s) => s, + } + } +} + +impl FromStr for TokenStream { + type Err = LexError; + + fn from_str(src: &str) -> Result { + if nightly_works() { + Ok(TokenStream::Nightly(src.parse()?)) + } else { + Ok(TokenStream::Stable(src.parse()?)) + } + } +} + +impl fmt::Display for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + TokenStream::Nightly(tts) => tts.fmt(f), + TokenStream::Stable(tts) => tts.fmt(f), + } + } +} + +impl From for TokenStream { + fn from(inner: proc_macro::TokenStream) -> TokenStream { + TokenStream::Nightly(inner) + } +} + +impl From for proc_macro::TokenStream { + fn from(inner: TokenStream) -> proc_macro::TokenStream { + match inner { + TokenStream::Nightly(inner) => inner, + TokenStream::Stable(inner) => inner.to_string().parse().unwrap(), + } + } +} + +impl From for TokenStream { + fn from(inner: stable::TokenStream) -> TokenStream { + TokenStream::Stable(inner) + } +} + +impl From for TokenStream { + fn from(token: TokenTree) -> TokenStream { + if !nightly_works() { + return TokenStream::Stable(token.into()); + } + let tt: proc_macro::TokenTree = match token { + TokenTree::Group(tt) => { + let delim = match tt.delimiter() { + Delimiter::Parenthesis => proc_macro::Delimiter::Parenthesis, + Delimiter::Bracket => proc_macro::Delimiter::Bracket, + Delimiter::Brace => proc_macro::Delimiter::Brace, + Delimiter::None => proc_macro::Delimiter::None, + }; + let span = tt.span(); + let mut group = proc_macro::Group::new(delim, tt.stream.inner.unwrap_nightly()); + group.set_span(span.inner.unwrap_nightly()); + group.into() + } + TokenTree::Punct(tt) => { + let spacing = match tt.spacing() { + Spacing::Joint => proc_macro::Spacing::Joint, + Spacing::Alone => proc_macro::Spacing::Alone, + }; + let mut op = proc_macro::Punct::new(tt.as_char(), spacing); + op.set_span(tt.span().inner.unwrap_nightly()); + op.into() + } + TokenTree::Ident(tt) => tt.inner.unwrap_nightly().into(), + TokenTree::Literal(tt) => tt.inner.unwrap_nightly().into(), + }; + TokenStream::Nightly(tt.into()) + } +} + +impl iter::FromIterator for TokenStream { + fn from_iter>(trees: I) -> Self { + if nightly_works() { + let trees = trees + .into_iter() + .map(TokenStream::from) + .flat_map(|t| match t { + TokenStream::Nightly(s) => s, + TokenStream::Stable(_) => mismatch(), + }); + TokenStream::Nightly(trees.collect()) + } else { + TokenStream::Stable(trees.into_iter().collect()) + } + } +} + +impl iter::FromIterator for TokenStream { + fn from_iter>(streams: I) -> Self { + let mut streams = streams.into_iter(); + match streams.next() { + #[cfg(slow_extend)] + Some(TokenStream::Nightly(first)) => { + let stream = iter::once(first).chain(streams.map(|s| { + match s { + TokenStream::Nightly(s) => s, + TokenStream::Stable(_) => mismatch(), + } + })).collect(); + TokenStream::Nightly(stream) + } + #[cfg(not(slow_extend))] + Some(TokenStream::Nightly(mut first)) => { + first.extend(streams.map(|s| { + match s { + TokenStream::Nightly(s) => s, + TokenStream::Stable(_) => mismatch(), + } + })); + TokenStream::Nightly(first) + } + Some(TokenStream::Stable(mut first)) => { + first.extend(streams.map(|s| { + match s { + TokenStream::Stable(s) => s, + TokenStream::Nightly(_) => mismatch(), + } + })); + TokenStream::Stable(first) + } + None => TokenStream::new(), + + } + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + match self { + TokenStream::Nightly(tts) => { + #[cfg(not(slow_extend))] + { + tts.extend( + streams + .into_iter() + .map(|t| TokenStream::from(t).unwrap_nightly()), + ); + } + #[cfg(slow_extend)] + { + *tts = tts + .clone() + .into_iter() + .chain( + streams + .into_iter() + .map(TokenStream::from) + .flat_map(|t| match t { + TokenStream::Nightly(tts) => tts.into_iter(), + _ => mismatch(), + }), + ).collect(); + } + } + TokenStream::Stable(tts) => tts.extend(streams), + } + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + match self { + TokenStream::Nightly(tts) => { + #[cfg(not(slow_extend))] + { + tts.extend(streams.into_iter().map(|stream| stream.unwrap_nightly())); + } + #[cfg(slow_extend)] + { + *tts = tts + .clone() + .into_iter() + .chain( + streams + .into_iter() + .flat_map(|t| match t { + TokenStream::Nightly(tts) => tts.into_iter(), + _ => mismatch(), + }), + ).collect(); + } + } + TokenStream::Stable(tts) => { + tts.extend(streams.into_iter().map(|stream| stream.unwrap_stable())) + } + } + } +} + +impl fmt::Debug for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + TokenStream::Nightly(tts) => tts.fmt(f), + TokenStream::Stable(tts) => tts.fmt(f), + } + } +} + +impl From for LexError { + fn from(e: proc_macro::LexError) -> LexError { + LexError::Nightly(e) + } +} + +impl From for LexError { + fn from(e: stable::LexError) -> LexError { + LexError::Stable(e) + } +} + +impl fmt::Debug for LexError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + LexError::Nightly(e) => e.fmt(f), + LexError::Stable(e) => e.fmt(f), + } + } +} + +pub enum TokenTreeIter { + Nightly(proc_macro::token_stream::IntoIter), + Stable(stable::TokenTreeIter), +} + +impl IntoIterator for TokenStream { + type Item = TokenTree; + type IntoIter = TokenTreeIter; + + fn into_iter(self) -> TokenTreeIter { + match self { + TokenStream::Nightly(tts) => TokenTreeIter::Nightly(tts.into_iter()), + TokenStream::Stable(tts) => TokenTreeIter::Stable(tts.into_iter()), + } + } +} + +impl Iterator for TokenTreeIter { + type Item = TokenTree; + + fn next(&mut self) -> Option { + let token = match self { + TokenTreeIter::Nightly(iter) => iter.next()?, + TokenTreeIter::Stable(iter) => return iter.next(), + }; + Some(match token { + proc_macro::TokenTree::Group(tt) => { + let delim = match tt.delimiter() { + proc_macro::Delimiter::Parenthesis => Delimiter::Parenthesis, + proc_macro::Delimiter::Bracket => Delimiter::Bracket, + proc_macro::Delimiter::Brace => Delimiter::Brace, + proc_macro::Delimiter::None => Delimiter::None, + }; + let stream = ::TokenStream::_new(TokenStream::Nightly(tt.stream())); + let mut g = Group::new(delim, stream); + g.set_span(::Span::_new(Span::Nightly(tt.span()))); + g.into() + } + proc_macro::TokenTree::Punct(tt) => { + let spacing = match tt.spacing() { + proc_macro::Spacing::Joint => Spacing::Joint, + proc_macro::Spacing::Alone => Spacing::Alone, + }; + let mut o = Punct::new(tt.as_char(), spacing); + o.set_span(::Span::_new(Span::Nightly(tt.span()))); + o.into() + } + proc_macro::TokenTree::Ident(s) => ::Ident::_new(Ident::Nightly(s)).into(), + proc_macro::TokenTree::Literal(l) => ::Literal::_new(Literal::Nightly(l)).into(), + }) + } + + fn size_hint(&self) -> (usize, Option) { + match self { + TokenTreeIter::Nightly(tts) => tts.size_hint(), + TokenTreeIter::Stable(tts) => tts.size_hint(), + } + } +} + +impl fmt::Debug for TokenTreeIter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("TokenTreeIter").finish() + } +} + +pub use stable::FileName; + +// NOTE: We have to generate our own filename object here because we can't wrap +// the one provided by proc_macro. +#[derive(Clone, PartialEq, Eq)] +#[cfg(super_unstable)] +pub enum SourceFile { + Nightly(proc_macro::SourceFile, FileName), + Stable(stable::SourceFile), +} + +#[cfg(super_unstable)] +impl SourceFile { + fn nightly(sf: proc_macro::SourceFile) -> Self { + let filename = stable::file_name(sf.path().display().to_string()); + SourceFile::Nightly(sf, filename) + } + + /// Get the path to this source file as a string. + pub fn path(&self) -> &FileName { + match self { + SourceFile::Nightly(_, f) => f, + SourceFile::Stable(a) => a.path(), + } + } + + pub fn is_real(&self) -> bool { + match self { + SourceFile::Nightly(a, _) => a.is_real(), + SourceFile::Stable(a) => a.is_real(), + } + } +} + +#[cfg(super_unstable)] +impl AsRef for SourceFile { + fn as_ref(&self) -> &FileName { + self.path() + } +} + +#[cfg(super_unstable)] +impl fmt::Debug for SourceFile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + SourceFile::Nightly(a, _) => a.fmt(f), + SourceFile::Stable(a) => a.fmt(f), + } + } +} + +pub struct LineColumn { + pub line: usize, + pub column: usize, +} + +#[derive(Copy, Clone)] +pub enum Span { + Nightly(proc_macro::Span), + Stable(stable::Span), +} + +impl Span { + pub fn call_site() -> Span { + if nightly_works() { + Span::Nightly(proc_macro::Span::call_site()) + } else { + Span::Stable(stable::Span::call_site()) + } + } + + #[cfg(super_unstable)] + pub fn def_site() -> Span { + if nightly_works() { + Span::Nightly(proc_macro::Span::def_site()) + } else { + Span::Stable(stable::Span::def_site()) + } + } + + #[cfg(super_unstable)] + pub fn resolved_at(&self, other: Span) -> Span { + match (self, other) { + (Span::Nightly(a), Span::Nightly(b)) => Span::Nightly(a.resolved_at(b)), + (Span::Stable(a), Span::Stable(b)) => Span::Stable(a.resolved_at(b)), + _ => mismatch(), + } + } + + #[cfg(super_unstable)] + pub fn located_at(&self, other: Span) -> Span { + match (self, other) { + (Span::Nightly(a), Span::Nightly(b)) => Span::Nightly(a.located_at(b)), + (Span::Stable(a), Span::Stable(b)) => Span::Stable(a.located_at(b)), + _ => mismatch(), + } + } + + pub fn unstable(self) -> proc_macro::Span { + match self { + Span::Nightly(s) => s, + Span::Stable(_) => mismatch(), + } + } + + #[cfg(super_unstable)] + pub fn source_file(&self) -> SourceFile { + match self { + Span::Nightly(s) => SourceFile::nightly(s.source_file()), + Span::Stable(s) => SourceFile::Stable(s.source_file()), + } + } + + #[cfg(super_unstable)] + pub fn start(&self) -> LineColumn { + match self { + Span::Nightly(s) => { + let proc_macro::LineColumn { line, column } = s.start(); + LineColumn { line, column } + } + Span::Stable(s) => { + let stable::LineColumn { line, column } = s.start(); + LineColumn { line, column } + } + } + } + + #[cfg(super_unstable)] + pub fn end(&self) -> LineColumn { + match self { + Span::Nightly(s) => { + let proc_macro::LineColumn { line, column } = s.end(); + LineColumn { line, column } + } + Span::Stable(s) => { + let stable::LineColumn { line, column } = s.end(); + LineColumn { line, column } + } + } + } + + #[cfg(super_unstable)] + pub fn join(&self, other: Span) -> Option { + let ret = match (self, other) { + (Span::Nightly(a), Span::Nightly(b)) => Span::Nightly(a.join(b)?), + (Span::Stable(a), Span::Stable(b)) => Span::Stable(a.join(b)?), + _ => return None, + }; + Some(ret) + } + + #[cfg(super_unstable)] + pub fn eq(&self, other: &Span) -> bool { + match (self, other) { + (Span::Nightly(a), Span::Nightly(b)) => a.eq(b), + (Span::Stable(a), Span::Stable(b)) => a.eq(b), + _ => false, + } + } + + fn unwrap_nightly(self) -> proc_macro::Span { + match self { + Span::Nightly(s) => s, + Span::Stable(_) => mismatch(), + } + } +} + +impl From for ::Span { + fn from(proc_span: proc_macro::Span) -> ::Span { + ::Span::_new(Span::Nightly(proc_span)) + } +} + +impl From for Span { + fn from(inner: stable::Span) -> Span { + Span::Stable(inner) + } +} + +impl fmt::Debug for Span { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Span::Nightly(s) => s.fmt(f), + Span::Stable(s) => s.fmt(f), + } + } +} + +#[derive(Clone)] +pub enum Ident { + Nightly(proc_macro::Ident), + Stable(stable::Ident), +} + +impl Ident { + pub fn new(string: &str, span: Span) -> Ident { + match span { + Span::Nightly(s) => Ident::Nightly(proc_macro::Ident::new(string, s)), + Span::Stable(s) => Ident::Stable(stable::Ident::new(string, s)), + } + } + + pub fn new_raw(string: &str, span: Span) -> Ident { + match span { + Span::Nightly(s) => { + let p: proc_macro::TokenStream = string.parse().unwrap(); + let ident = match p.into_iter().next() { + Some(proc_macro::TokenTree::Ident(mut i)) => { + i.set_span(s); + i + } + _ => panic!(), + }; + Ident::Nightly(ident) + } + Span::Stable(s) => Ident::Stable(stable::Ident::new_raw(string, s)), + } + } + + pub fn span(&self) -> Span { + match self { + Ident::Nightly(t) => Span::Nightly(t.span()), + Ident::Stable(t) => Span::Stable(t.span()), + } + } + + pub fn set_span(&mut self, span: Span) { + match (self, span) { + (Ident::Nightly(t), Span::Nightly(s)) => t.set_span(s), + (Ident::Stable(t), Span::Stable(s)) => t.set_span(s), + _ => mismatch(), + } + } + + fn unwrap_nightly(self) -> proc_macro::Ident { + match self { + Ident::Nightly(s) => s, + Ident::Stable(_) => mismatch(), + } + } +} + +impl PartialEq for Ident { + fn eq(&self, other: &Ident) -> bool { + match (self, other) { + (Ident::Nightly(t), Ident::Nightly(o)) => t.to_string() == o.to_string(), + (Ident::Stable(t), Ident::Stable(o)) => t == o, + _ => mismatch(), + } + } +} + +impl PartialEq for Ident +where + T: ?Sized + AsRef, +{ + fn eq(&self, other: &T) -> bool { + let other = other.as_ref(); + match self { + Ident::Nightly(t) => t.to_string() == other, + Ident::Stable(t) => t == other, + } + } +} + +impl fmt::Display for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Ident::Nightly(t) => t.fmt(f), + Ident::Stable(t) => t.fmt(f), + } + } +} + +impl fmt::Debug for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Ident::Nightly(t) => t.fmt(f), + Ident::Stable(t) => t.fmt(f), + } + } +} + +#[derive(Clone)] +pub enum Literal { + Nightly(proc_macro::Literal), + Stable(stable::Literal), +} + +macro_rules! suffixed_numbers { + ($($name:ident => $kind:ident,)*) => ($( + pub fn $name(n: $kind) -> Literal { + if nightly_works() { + Literal::Nightly(proc_macro::Literal::$name(n)) + } else { + Literal::Stable(stable::Literal::$name(n)) + } + } + )*) +} + +macro_rules! unsuffixed_integers { + ($($name:ident => $kind:ident,)*) => ($( + pub fn $name(n: $kind) -> Literal { + if nightly_works() { + Literal::Nightly(proc_macro::Literal::$name(n)) + } else { + Literal::Stable(stable::Literal::$name(n)) + } + } + )*) +} + +impl Literal { + suffixed_numbers! { + u8_suffixed => u8, + u16_suffixed => u16, + u32_suffixed => u32, + u64_suffixed => u64, + usize_suffixed => usize, + i8_suffixed => i8, + i16_suffixed => i16, + i32_suffixed => i32, + i64_suffixed => i64, + isize_suffixed => isize, + + f32_suffixed => f32, + f64_suffixed => f64, + } + + unsuffixed_integers! { + u8_unsuffixed => u8, + u16_unsuffixed => u16, + u32_unsuffixed => u32, + u64_unsuffixed => u64, + usize_unsuffixed => usize, + i8_unsuffixed => i8, + i16_unsuffixed => i16, + i32_unsuffixed => i32, + i64_unsuffixed => i64, + isize_unsuffixed => isize, + } + + pub fn f32_unsuffixed(f: f32) -> Literal { + if nightly_works() { + Literal::Nightly(proc_macro::Literal::f32_unsuffixed(f)) + } else { + Literal::Stable(stable::Literal::f32_unsuffixed(f)) + } + } + + pub fn f64_unsuffixed(f: f64) -> Literal { + if nightly_works() { + Literal::Nightly(proc_macro::Literal::f64_unsuffixed(f)) + } else { + Literal::Stable(stable::Literal::f64_unsuffixed(f)) + } + } + + pub fn string(t: &str) -> Literal { + if nightly_works() { + Literal::Nightly(proc_macro::Literal::string(t)) + } else { + Literal::Stable(stable::Literal::string(t)) + } + } + + pub fn character(t: char) -> Literal { + if nightly_works() { + Literal::Nightly(proc_macro::Literal::character(t)) + } else { + Literal::Stable(stable::Literal::character(t)) + } + } + + pub fn byte_string(bytes: &[u8]) -> Literal { + if nightly_works() { + Literal::Nightly(proc_macro::Literal::byte_string(bytes)) + } else { + Literal::Stable(stable::Literal::byte_string(bytes)) + } + } + + pub fn span(&self) -> Span { + match self { + Literal::Nightly(lit) => Span::Nightly(lit.span()), + Literal::Stable(lit) => Span::Stable(lit.span()), + } + } + + pub fn set_span(&mut self, span: Span) { + match (self, span) { + (Literal::Nightly(lit), Span::Nightly(s)) => lit.set_span(s), + (Literal::Stable(lit), Span::Stable(s)) => lit.set_span(s), + _ => mismatch(), + } + } + + fn unwrap_nightly(self) -> proc_macro::Literal { + match self { + Literal::Nightly(s) => s, + Literal::Stable(_) => mismatch(), + } + } +} + +impl From for Literal { + fn from(s: stable::Literal) -> Literal { + Literal::Stable(s) + } +} + +impl fmt::Display for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Literal::Nightly(t) => t.fmt(f), + Literal::Stable(t) => t.fmt(f), + } + } +} + +impl fmt::Debug for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Literal::Nightly(t) => t.fmt(f), + Literal::Stable(t) => t.fmt(f), + } + } +} --- /dev/null +++ b/vendor/proc-macro2-0.4.19/tests/test.rs @@ -0,0 +1,390 @@ +extern crate proc_macro2; + +use std::str::{self, FromStr}; + +use proc_macro2::{Ident, Literal, Spacing, Span, TokenStream, TokenTree}; + +#[test] +fn terms() { + assert_eq!( + Ident::new("String", Span::call_site()).to_string(), + "String" + ); + assert_eq!(Ident::new("fn", Span::call_site()).to_string(), "fn"); + assert_eq!(Ident::new("_", Span::call_site()).to_string(), "_"); +} + +#[test] +#[cfg(procmacro2_semver_exempt)] +fn raw_terms() { + assert_eq!( + Ident::new_raw("String", Span::call_site()).to_string(), + "r#String" + ); + assert_eq!(Ident::new_raw("fn", Span::call_site()).to_string(), "r#fn"); + assert_eq!(Ident::new_raw("_", Span::call_site()).to_string(), "r#_"); +} + +#[test] +#[should_panic(expected = "Ident is not allowed to be empty; use Option")] +fn term_empty() { + Ident::new("", Span::call_site()); +} + +#[test] +#[should_panic(expected = "Ident cannot be a number; use Literal instead")] +fn term_number() { + Ident::new("255", Span::call_site()); +} + +#[test] +#[should_panic(expected = "\"a#\" is not a valid Ident")] +fn term_invalid() { + Ident::new("a#", Span::call_site()); +} + +#[test] +#[should_panic(expected = "not a valid Ident")] +fn raw_term_empty() { + Ident::new("r#", Span::call_site()); +} + +#[test] +#[should_panic(expected = "not a valid Ident")] +fn raw_term_number() { + Ident::new("r#255", Span::call_site()); +} + +#[test] +#[should_panic(expected = "\"r#a#\" is not a valid Ident")] +fn raw_term_invalid() { + Ident::new("r#a#", Span::call_site()); +} + +#[test] +#[should_panic(expected = "not a valid Ident")] +fn lifetime_empty() { + Ident::new("'", Span::call_site()); +} + +#[test] +#[should_panic(expected = "not a valid Ident")] +fn lifetime_number() { + Ident::new("'255", Span::call_site()); +} + +#[test] +#[should_panic(expected = r#""\'a#" is not a valid Ident"#)] +fn lifetime_invalid() { + Ident::new("'a#", Span::call_site()); +} + +#[test] +fn literals() { + assert_eq!(Literal::string("foo").to_string(), "\"foo\""); + assert_eq!(Literal::string("\"").to_string(), "\"\\\"\""); + assert_eq!(Literal::f32_unsuffixed(10.0).to_string(), "10.0"); +} + +#[test] +fn roundtrip() { + fn roundtrip(p: &str) { + println!("parse: {}", p); + let s = p.parse::().unwrap().to_string(); + println!("first: {}", s); + let s2 = s.to_string().parse::().unwrap().to_string(); + assert_eq!(s, s2); + } + roundtrip("a"); + roundtrip("<<"); + roundtrip("<<="); + roundtrip( + " + 1 + 1.0 + 1f32 + 2f64 + 1usize + 4isize + 4e10 + 1_000 + 1_0i32 + 8u8 + 9 + 0 + 0xffffffffffffffffffffffffffffffff + ", + ); + roundtrip("'a"); + roundtrip("'_"); + roundtrip("'static"); + roundtrip("'\\u{10__FFFF}'"); + roundtrip("\"\\u{10_F0FF__}foo\\u{1_0_0_0__}\""); +} + +#[test] +fn fail() { + fn fail(p: &str) { + if let Ok(s) = p.parse::() { + panic!("should have failed to parse: {}\n{:#?}", p, s); + } + } + fail("1x"); + fail("1u80"); + fail("1f320"); + fail("' static"); + fail("r#1"); + fail("r#_"); +} + +#[cfg(procmacro2_semver_exempt)] +#[test] +fn span_test() { + use proc_macro2::TokenTree; + + fn check_spans(p: &str, mut lines: &[(usize, usize, usize, usize)]) { + let ts = p.parse::().unwrap(); + check_spans_internal(ts, &mut lines); + } + + fn check_spans_internal(ts: TokenStream, lines: &mut &[(usize, usize, usize, usize)]) { + for i in ts { + if let Some((&(sline, scol, eline, ecol), rest)) = lines.split_first() { + *lines = rest; + + let start = i.span().start(); + assert_eq!(start.line, sline, "sline did not match for {}", i); + assert_eq!(start.column, scol, "scol did not match for {}", i); + + let end = i.span().end(); + assert_eq!(end.line, eline, "eline did not match for {}", i); + assert_eq!(end.column, ecol, "ecol did not match for {}", i); + + match i { + TokenTree::Group(ref g) => { + check_spans_internal(g.stream().clone(), lines); + } + _ => {} + } + } + } + } + + check_spans( + "\ +/// This is a document comment +testing 123 +{ + testing 234 +}", + &[ + (1, 0, 1, 30), // # + (1, 0, 1, 30), // [ ... ] + (1, 0, 1, 30), // doc + (1, 0, 1, 30), // = + (1, 0, 1, 30), // "This is..." + (2, 0, 2, 7), // testing + (2, 8, 2, 11), // 123 + (3, 0, 5, 1), // { ... } + (4, 2, 4, 9), // testing + (4, 10, 4, 13), // 234 + ], + ); +} + +#[cfg(procmacro2_semver_exempt)] +#[cfg(not(feature = "nightly"))] +#[test] +fn default_span() { + let start = Span::call_site().start(); + assert_eq!(start.line, 1); + assert_eq!(start.column, 0); + let end = Span::call_site().end(); + assert_eq!(end.line, 1); + assert_eq!(end.column, 0); + let source_file = Span::call_site().source_file(); + assert_eq!(source_file.path().to_string(), ""); + assert!(!source_file.is_real()); +} + +#[cfg(procmacro2_semver_exempt)] +#[test] +fn span_join() { + let source1 = "aaa\nbbb" + .parse::() + .unwrap() + .into_iter() + .collect::>(); + let source2 = "ccc\nddd" + .parse::() + .unwrap() + .into_iter() + .collect::>(); + + assert!(source1[0].span().source_file() != source2[0].span().source_file()); + assert_eq!( + source1[0].span().source_file(), + source1[1].span().source_file() + ); + + let joined1 = source1[0].span().join(source1[1].span()); + let joined2 = source1[0].span().join(source2[0].span()); + assert!(joined1.is_some()); + assert!(joined2.is_none()); + + let start = joined1.unwrap().start(); + let end = joined1.unwrap().end(); + assert_eq!(start.line, 1); + assert_eq!(start.column, 0); + assert_eq!(end.line, 2); + assert_eq!(end.column, 3); + + assert_eq!( + joined1.unwrap().source_file(), + source1[0].span().source_file() + ); +} + +#[test] +fn no_panic() { + let s = str::from_utf8(b"b\'\xc2\x86 \x00\x00\x00^\"").unwrap(); + assert!(s.parse::().is_err()); +} + +#[test] +fn tricky_doc_comment() { + let stream = "/**/".parse::().unwrap(); + let tokens = stream.into_iter().collect::>(); + assert!(tokens.is_empty(), "not empty -- {:?}", tokens); + + let stream = "/// doc".parse::().unwrap(); + let tokens = stream.into_iter().collect::>(); + assert!(tokens.len() == 2, "not length 2 -- {:?}", tokens); + match tokens[0] { + proc_macro2::TokenTree::Punct(ref tt) => assert_eq!(tt.as_char(), '#'), + _ => panic!("wrong token {:?}", tokens[0]), + } + let mut tokens = match tokens[1] { + proc_macro2::TokenTree::Group(ref tt) => { + assert_eq!(tt.delimiter(), proc_macro2::Delimiter::Bracket); + tt.stream().into_iter() + } + _ => panic!("wrong token {:?}", tokens[0]), + }; + + match tokens.next().unwrap() { + proc_macro2::TokenTree::Ident(ref tt) => assert_eq!(tt.to_string(), "doc"), + t => panic!("wrong token {:?}", t), + } + match tokens.next().unwrap() { + proc_macro2::TokenTree::Punct(ref tt) => assert_eq!(tt.as_char(), '='), + t => panic!("wrong token {:?}", t), + } + match tokens.next().unwrap() { + proc_macro2::TokenTree::Literal(ref tt) => { + assert_eq!(tt.to_string(), "\" doc\""); + } + t => panic!("wrong token {:?}", t), + } + assert!(tokens.next().is_none()); + + let stream = "//! doc".parse::().unwrap(); + let tokens = stream.into_iter().collect::>(); + assert!(tokens.len() == 3, "not length 3 -- {:?}", tokens); +} + +#[test] +fn op_before_comment() { + let mut tts = TokenStream::from_str("~// comment").unwrap().into_iter(); + match tts.next().unwrap() { + TokenTree::Punct(tt) => { + assert_eq!(tt.as_char(), '~'); + assert_eq!(tt.spacing(), Spacing::Alone); + } + wrong => panic!("wrong token {:?}", wrong), + } +} + +#[test] +fn raw_identifier() { + let mut tts = TokenStream::from_str("r#dyn").unwrap().into_iter(); + match tts.next().unwrap() { + TokenTree::Ident(raw) => assert_eq!("r#dyn", raw.to_string()), + wrong => panic!("wrong token {:?}", wrong), + } + assert!(tts.next().is_none()); +} + +#[test] +fn test_debug_ident() { + let ident = Ident::new("proc_macro", Span::call_site()); + + #[cfg(not(procmacro2_semver_exempt))] + let expected = "Ident(proc_macro)"; + + #[cfg(procmacro2_semver_exempt)] + let expected = "Ident { sym: proc_macro, span: bytes(0..0) }"; + + assert_eq!(expected, format!("{:?}", ident)); +} + +#[test] +#[cfg(not(feature = "nightly"))] +fn test_debug_tokenstream() { + let tts = TokenStream::from_str("[a + 1]").unwrap(); + + #[cfg(not(procmacro2_semver_exempt))] + let expected = "\ +TokenStream [ + Group { + delimiter: Bracket, + stream: TokenStream [ + Ident { + sym: a + }, + Punct { + op: '+', + spacing: Alone + }, + Literal { + lit: 1 + } + ] + } +]\ + "; + + #[cfg(procmacro2_semver_exempt)] + let expected = "\ +TokenStream [ + Group { + delimiter: Bracket, + stream: TokenStream [ + Ident { + sym: a, + span: bytes(2..3) + }, + Punct { + op: '+', + spacing: Alone, + span: bytes(4..5) + }, + Literal { + lit: 1, + span: bytes(6..7) + } + ], + span: bytes(1..8) + } +]\ + "; + + assert_eq!(expected, format!("{:#?}", tts)); +} + +#[test] +fn default_tokenstream_is_empty() { + let default_token_stream: TokenStream = Default::default(); + + assert!(default_token_stream.is_empty()); +} --- a/vendor/proc-macro2/.cargo-checksum.json +++ b/vendor/proc-macro2/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"8c6bfec48eb14e8224ae01653817c4cfe63d4a69b7c28bbea3031c69f6a55e6b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"538fd635d385b6a90ef4cc1e361aad717162a139e932a6192212cad8407aa8e1","build.rs":"d3abc22ceb358ffc026f2c388b0a110a05637d8b5dda642e2c81323be9cb79f9","src/lib.rs":"fbe590b22b2fbf6e73bb750ec43cc59b4dd03e85e928065c723f849b29d59846","src/stable.rs":"4d6c27538b219e955df44f992c89a3b11f460fd549f21d5b78e818b957c3f341","src/strnom.rs":"807c377bdb49b8b1c67d013089b8ff33fe93ffd3fa36b6440dbb1d6fe8cd9c17","src/unstable.rs":"62157614541a82a026c3af8609c3f81e63d805666a41fd3a2a023268a4049847","tests/test.rs":"428f4298e16a23db8f8fbb6101a30e993f08dc0befa2d95439dcefb364d7a7cf"},"package":"ffe022fb8c8bd254524b0b3305906c1921fa37a84a644e29079a9e62200c3901"} \ No newline at end of file +{"files":{"Cargo.toml":"719a9a085466f3245b47979a9a1557ba850503bf90aa99b912ad5dd8c7570a95","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"362a2156f7645528061b6e8487a2eb0f32f1693012ed82ee57afa05c039bba0d","build.rs":"0cc6e2cb919ddbff59cf1d810283939f97a59f0037540c0f2ee3453237635ff8","src/fallback.rs":"5c6379a90735e27abcc40253b223158c6b1e5784f3850bc423335363e87ef038","src/lib.rs":"cbcf1061804f4944a3ce90d67692c2aa5918541d25ff2dc42e00cd82f646c0af","src/strnom.rs":"37f7791f73f123817ad5403af1d4e2a0714be27401729a2d451bc80b1f26bac9","src/wrapper.rs":"81372e910604217a625aa71c47d43e65f4e008456eae93ac39325c9abf10701a","tests/features.rs":"a86deb8644992a4eb64d9fd493eff16f9cf9c5cb6ade3a634ce0c990cf87d559","tests/marker.rs":"c2652e3ae1dfcb94d2e6313b29712c5dcbd0fe62026913e67bb7cebd7560aade","tests/test.rs":"8c427be9cba1fa8d4a16647e53e3545e5863e29e2c0b311c93c9dd1399abf6a1"},"package":"9c9e470a8dc4aeae2dee2f335e8f533e2d4b347e1434e5671afc49b054592f27"} \ No newline at end of file --- a/vendor/proc-macro2/Cargo.toml +++ b/vendor/proc-macro2/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -11,28 +11,30 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "proc-macro2" -version = "0.4.19" +version = "1.0.6" authors = ["Alex Crichton "] -build = "build.rs" description = "A stable implementation of the upcoming new `proc_macro` API. Comes with an\noption, off by default, to also reimplement itself in terms of the upstream\nunstable API.\n" homepage = "https://github.com/alexcrichton/proc-macro2" documentation = "https://docs.rs/proc-macro2" readme = "README.md" keywords = ["macros"] -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/alexcrichton/proc-macro2" [package.metadata.docs.rs] +rustc-args = ["--cfg", "procmacro2_semver_exempt"] rustdoc-args = ["--cfg", "procmacro2_semver_exempt"] - -[lib] -doctest = false [dependencies.unicode-xid] -version = "0.1" +version = "0.2" +[dev-dependencies.quote] +version = "1.0" +default_features = false [features] default = ["proc-macro"] -nightly = ["proc-macro"] +nightly = [] proc-macro = [] +span-locations = [] [badges.travis-ci] repository = "alexcrichton/proc-macro2" --- a/vendor/proc-macro2/README.md +++ b/vendor/proc-macro2/README.md @@ -1,60 +1,69 @@ # proc-macro2 -[![Build Status](https://api.travis-ci.org/alexcrichton/proc-macro2.svg?branch=master)](https://travis-ci.org/alexcrichton/proc-macro2) +[![Build Status](https://api.travis-ci.com/alexcrichton/proc-macro2.svg?branch=master)](https://travis-ci.com/alexcrichton/proc-macro2) [![Latest Version](https://img.shields.io/crates/v/proc-macro2.svg)](https://crates.io/crates/proc-macro2) [![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/proc-macro2) -A small shim over the `proc_macro` crate in the compiler intended to multiplex -the stable interface as of 1.15.0 and the interface as of 1.30.0. +A wrapper around the procedural macro API of the compiler's `proc_macro` crate. +This library serves two purposes: -New features added in Rust 1.30.0 include: +- **Bring proc-macro-like functionality to other contexts like build.rs and + main.rs.** Types from `proc_macro` are entirely specific to procedural macros + and cannot ever exist in code outside of a procedural macro. Meanwhile + `proc_macro2` types may exist anywhere including non-macro code. By developing + foundational libraries like [syn] and [quote] against `proc_macro2` rather + than `proc_macro`, the procedural macro ecosystem becomes easily applicable to + many other use cases and we avoid reimplementing non-macro equivalents of + those libraries. + +- **Make procedural macros unit testable.** As a consequence of being specific + to procedural macros, nothing that uses `proc_macro` can be executed from a + unit test. In order for helper libraries or components of a macro to be + testable in isolation, they must be implemented using `proc_macro2`. -* Span information on tokens -* No need to go in/out through strings -* Structured input/output - -Libraries ported to `proc_macro2` can retain support for older compilers while -continuing to get all the nice benefits of using a 1.30.0+ compiler. +[syn]: https://github.com/dtolnay/syn +[quote]: https://github.com/dtolnay/quote ## Usage -This crate compiles on all 1.15.0+ stable compilers and usage looks like: - ```toml [dependencies] -proc-macro2 = "0.4" +proc-macro2 = "1.0" ``` -followed by +The skeleton of a typical procedural macro typically looks like this: ```rust extern crate proc_macro; -extern crate proc_macro2; #[proc_macro_derive(MyDerive)] pub fn my_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let input: proc_macro2::TokenStream = input.into(); + let input = proc_macro2::TokenStream::from(input); let output: proc_macro2::TokenStream = { /* transform input */ }; - output.into() + proc_macro::TokenStream::from(output) } ``` -The 1.30.0 compiler is automatically detected and its interfaces are used when -available. +If parsing with [Syn], you'll use [`parse_macro_input!`] instead to propagate +parse errors correctly back to the compiler when parsing fails. -## Unstable Features +[`parse_macro_input!`]: https://docs.rs/syn/1.0/syn/macro.parse_macro_input.html -`proc-macro2` supports exporting some methods from `proc_macro` which are -currently highly unstable, and are not stabilized in the first pass of -`proc_macro` stabilizations. These features are not exported by default. Minor -versions of `proc-macro2` may make breaking changes to them at any time. +## Unstable features -To enable these features, the `procmacro2_semver_exempt` config flag must be -passed to rustc. +The default feature set of proc-macro2 tracks the most recent stable compiler +API. Functionality in `proc_macro` that is not yet stable is not exposed by +proc-macro2 by default. + +To opt into the additional APIs available in the most recent nightly compiler, +the `procmacro2_semver_exempt` config flag must be passed to rustc. We will +polyfill those nightly-only APIs back to Rust 1.31.0. As these are unstable APIs +that track the nightly compiler, minor versions of proc-macro2 may make breaking +changes to them at any time. ``` RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build @@ -64,19 +73,21 @@ Note that this must not only be done for depends on your crate. This infectious nature is intentional, as it serves as a reminder that you are outside of the normal semver guarantees. -# License +Semver exempt methods are marked as such in the proc-macro2 documentation. -This project is licensed under either of +
- * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or - http://opensource.org/licenses/MIT) +#### License -at your option. + +Licensed under either of Apache License, Version +2.0 or MIT license at your option. + -### Contribution +
+ Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in Serde by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. +for inclusion in this crate by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. + --- a/vendor/proc-macro2/build.rs +++ b/vendor/proc-macro2/build.rs @@ -1,33 +1,79 @@ +// rustc-cfg emitted by the build script: +// +// "use_proc_macro" +// Link to extern crate proc_macro. Available on any compiler and any target +// except wasm32. Requires "proc-macro" Cargo cfg to be enabled (default is +// enabled). On wasm32 we never link to proc_macro even if "proc-macro" cfg +// is enabled. +// +// "wrap_proc_macro" +// Wrap types from libproc_macro rather than polyfilling the whole API. +// Enabled on rustc 1.29+ as long as procmacro2_semver_exempt is not set, +// because we can't emulate the unstable API without emulating everything +// else. Also enabled unconditionally on nightly, in which case the +// procmacro2_semver_exempt surface area is implemented by using the +// nightly-only proc_macro API. +// +// "proc_macro_span" +// Enable non-dummy behavior of Span::start and Span::end methods which +// requires an unstable compiler feature. Enabled when building with +// nightly, unless `-Z allow-feature` in RUSTFLAGS disallows unstable +// features. +// +// "super_unstable" +// Implement the semver exempt API in terms of the nightly-only proc_macro +// API. Enabled when using procmacro2_semver_exempt on a nightly compiler. +// +// "span_locations" +// Provide methods Span::start and Span::end which give the line/column +// location of a token. Enabled by procmacro2_semver_exempt or the +// "span-locations" Cargo cfg. This is behind a cfg because tracking +// location inside spans is a performance hit. + use std::env; -use std::process::Command; +use std::process::{self, Command}; use std::str; fn main() { println!("cargo:rerun-if-changed=build.rs"); - let target = env::var("TARGET").unwrap(); + let version = match rustc_version() { + Some(version) => version, + None => return, + }; + + if version.minor < 31 { + eprintln!("Minimum supported rustc version is 1.31"); + process::exit(1); + } + + let semver_exempt = cfg!(procmacro2_semver_exempt); + if semver_exempt { + // https://github.com/alexcrichton/proc-macro2/issues/147 + println!("cargo:rustc-cfg=procmacro2_semver_exempt"); + } + + if semver_exempt || cfg!(feature = "span-locations") { + println!("cargo:rustc-cfg=span_locations"); + } + let target = env::var("TARGET").unwrap(); if !enable_use_proc_macro(&target) { return; } - println!("cargo:rustc-cfg=use_proc_macro"); - let minor = match rustc_minor_version() { - Some(n) => n, - None => return, - }; + println!("cargo:rustc-cfg=use_proc_macro"); - // Rust 1.29 stabilized the necessary APIs in the `proc_macro` crate - if minor >= 29 || cfg!(feature = "nightly") { + if version.nightly || !semver_exempt { println!("cargo:rustc-cfg=wrap_proc_macro"); + } - if cfg!(procmacro2_semver_exempt) { - println!("cargo:rustc-cfg=super_unstable"); - } + if version.nightly && feature_allowed("proc_macro_span") { + println!("cargo:rustc-cfg=proc_macro_span"); } - if minor == 29 { - println!("cargo:rustc-cfg=slow_extend"); + if semver_exempt && version.nightly { + println!("cargo:rustc-cfg=super_unstable"); } } @@ -41,21 +87,43 @@ fn enable_use_proc_macro(target: &str) - cfg!(feature = "proc-macro") } -fn rustc_minor_version() -> Option { - macro_rules! otry { - ($e:expr) => { - match $e { - Some(e) => e, - None => return None, - } - }; - } - let rustc = otry!(env::var_os("RUSTC")); - let output = otry!(Command::new(rustc).arg("--version").output().ok()); - let version = otry!(str::from_utf8(&output.stdout).ok()); +struct RustcVersion { + minor: u32, + nightly: bool, +} + +fn rustc_version() -> Option { + let rustc = env::var_os("RUSTC")?; + let output = Command::new(rustc).arg("--version").output().ok()?; + let version = str::from_utf8(&output.stdout).ok()?; + let nightly = version.contains("nightly") || version.contains("dev"); let mut pieces = version.split('.'); if pieces.next() != Some("rustc 1") { return None; } - otry!(pieces.next()).parse().ok() + let minor = pieces.next()?.parse().ok()?; + Some(RustcVersion { minor, nightly }) +} + +fn feature_allowed(feature: &str) -> bool { + // Recognized formats: + // + // -Z allow-features=feature1,feature2 + // + // -Zallow-features=feature1,feature2 + + if let Some(rustflags) = env::var_os("RUSTFLAGS") { + for mut flag in rustflags.to_string_lossy().split(' ') { + if flag.starts_with("-Z") { + flag = &flag["-Z".len()..]; + } + if flag.starts_with("allow-features=") { + flag = &flag["allow-features=".len()..]; + return flag.split(',').any(|allowed| allowed == feature); + } + } + } + + // No allow-features= flag, allowed by default. + true } --- /dev/null +++ b/vendor/proc-macro2/src/fallback.rs @@ -0,0 +1,1434 @@ +#[cfg(span_locations)] +use std::cell::RefCell; +#[cfg(span_locations)] +use std::cmp; +use std::fmt; +use std::iter; +use std::ops::RangeBounds; +#[cfg(procmacro2_semver_exempt)] +use std::path::Path; +use std::path::PathBuf; +use std::str::FromStr; +use std::vec; + +use crate::strnom::{block_comment, skip_whitespace, whitespace, word_break, Cursor, PResult}; +use crate::{Delimiter, Punct, Spacing, TokenTree}; +use unicode_xid::UnicodeXID; + +#[derive(Clone)] +pub struct TokenStream { + inner: Vec, +} + +#[derive(Debug)] +pub struct LexError; + +impl TokenStream { + pub fn new() -> TokenStream { + TokenStream { inner: Vec::new() } + } + + pub fn is_empty(&self) -> bool { + self.inner.len() == 0 + } +} + +#[cfg(span_locations)] +fn get_cursor(src: &str) -> Cursor { + // Create a dummy file & add it to the source map + SOURCE_MAP.with(|cm| { + let mut cm = cm.borrow_mut(); + let name = format!("", cm.files.len()); + let span = cm.add_file(&name, src); + Cursor { + rest: src, + off: span.lo, + } + }) +} + +#[cfg(not(span_locations))] +fn get_cursor(src: &str) -> Cursor { + Cursor { rest: src } +} + +impl FromStr for TokenStream { + type Err = LexError; + + fn from_str(src: &str) -> Result { + // Create a dummy file & add it to the source map + let cursor = get_cursor(src); + + match token_stream(cursor) { + Ok((input, output)) => { + if skip_whitespace(input).len() != 0 { + Err(LexError) + } else { + Ok(output) + } + } + Err(LexError) => Err(LexError), + } + } +} + +impl fmt::Display for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut joint = false; + for (i, tt) in self.inner.iter().enumerate() { + if i != 0 && !joint { + write!(f, " ")?; + } + joint = false; + match *tt { + TokenTree::Group(ref tt) => { + let (start, end) = match tt.delimiter() { + Delimiter::Parenthesis => ("(", ")"), + Delimiter::Brace => ("{", "}"), + Delimiter::Bracket => ("[", "]"), + Delimiter::None => ("", ""), + }; + if tt.stream().into_iter().next().is_none() { + write!(f, "{} {}", start, end)? + } else { + write!(f, "{} {} {}", start, tt.stream(), end)? + } + } + TokenTree::Ident(ref tt) => write!(f, "{}", tt)?, + TokenTree::Punct(ref tt) => { + write!(f, "{}", tt.as_char())?; + match tt.spacing() { + Spacing::Alone => {} + Spacing::Joint => joint = true, + } + } + TokenTree::Literal(ref tt) => write!(f, "{}", tt)?, + } + } + + Ok(()) + } +} + +impl fmt::Debug for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("TokenStream ")?; + f.debug_list().entries(self.clone()).finish() + } +} + +#[cfg(use_proc_macro)] +impl From for TokenStream { + fn from(inner: proc_macro::TokenStream) -> TokenStream { + inner + .to_string() + .parse() + .expect("compiler token stream parse failed") + } +} + +#[cfg(use_proc_macro)] +impl From for proc_macro::TokenStream { + fn from(inner: TokenStream) -> proc_macro::TokenStream { + inner + .to_string() + .parse() + .expect("failed to parse to compiler tokens") + } +} + +impl From for TokenStream { + fn from(tree: TokenTree) -> TokenStream { + TokenStream { inner: vec![tree] } + } +} + +impl iter::FromIterator for TokenStream { + fn from_iter>(streams: I) -> Self { + let mut v = Vec::new(); + + for token in streams.into_iter() { + v.push(token); + } + + TokenStream { inner: v } + } +} + +impl iter::FromIterator for TokenStream { + fn from_iter>(streams: I) -> Self { + let mut v = Vec::new(); + + for stream in streams.into_iter() { + v.extend(stream.inner); + } + + TokenStream { inner: v } + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + self.inner.extend(streams); + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + self.inner + .extend(streams.into_iter().flat_map(|stream| stream)); + } +} + +pub type TokenTreeIter = vec::IntoIter; + +impl IntoIterator for TokenStream { + type Item = TokenTree; + type IntoIter = TokenTreeIter; + + fn into_iter(self) -> TokenTreeIter { + self.inner.into_iter() + } +} + +#[derive(Clone, PartialEq, Eq)] +pub struct SourceFile { + path: PathBuf, +} + +impl SourceFile { + /// Get the path to this source file as a string. + pub fn path(&self) -> PathBuf { + self.path.clone() + } + + pub fn is_real(&self) -> bool { + // XXX(nika): Support real files in the future? + false + } +} + +impl fmt::Debug for SourceFile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SourceFile") + .field("path", &self.path()) + .field("is_real", &self.is_real()) + .finish() + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct LineColumn { + pub line: usize, + pub column: usize, +} + +#[cfg(span_locations)] +thread_local! { + static SOURCE_MAP: RefCell = RefCell::new(SourceMap { + // NOTE: We start with a single dummy file which all call_site() and + // def_site() spans reference. + files: vec![{ + #[cfg(procmacro2_semver_exempt)] + { + FileInfo { + name: "".to_owned(), + span: Span { lo: 0, hi: 0 }, + lines: vec![0], + } + } + + #[cfg(not(procmacro2_semver_exempt))] + { + FileInfo { + span: Span { lo: 0, hi: 0 }, + lines: vec![0], + } + } + }], + }); +} + +#[cfg(span_locations)] +struct FileInfo { + #[cfg(procmacro2_semver_exempt)] + name: String, + span: Span, + lines: Vec, +} + +#[cfg(span_locations)] +impl FileInfo { + fn offset_line_column(&self, offset: usize) -> LineColumn { + assert!(self.span_within(Span { + lo: offset as u32, + hi: offset as u32 + })); + let offset = offset - self.span.lo as usize; + match self.lines.binary_search(&offset) { + Ok(found) => LineColumn { + line: found + 1, + column: 0, + }, + Err(idx) => LineColumn { + line: idx, + column: offset - self.lines[idx - 1], + }, + } + } + + fn span_within(&self, span: Span) -> bool { + span.lo >= self.span.lo && span.hi <= self.span.hi + } +} + +/// Computesthe offsets of each line in the given source string. +#[cfg(span_locations)] +fn lines_offsets(s: &str) -> Vec { + let mut lines = vec![0]; + let mut prev = 0; + while let Some(len) = s[prev..].find('\n') { + prev += len + 1; + lines.push(prev); + } + lines +} + +#[cfg(span_locations)] +struct SourceMap { + files: Vec, +} + +#[cfg(span_locations)] +impl SourceMap { + fn next_start_pos(&self) -> u32 { + // Add 1 so there's always space between files. + // + // We'll always have at least 1 file, as we initialize our files list + // with a dummy file. + self.files.last().unwrap().span.hi + 1 + } + + fn add_file(&mut self, name: &str, src: &str) -> Span { + let lines = lines_offsets(src); + let lo = self.next_start_pos(); + // XXX(nika): Shouild we bother doing a checked cast or checked add here? + let span = Span { + lo, + hi: lo + (src.len() as u32), + }; + + #[cfg(procmacro2_semver_exempt)] + self.files.push(FileInfo { + name: name.to_owned(), + span, + lines, + }); + + #[cfg(not(procmacro2_semver_exempt))] + self.files.push(FileInfo { span, lines }); + let _ = name; + + span + } + + fn fileinfo(&self, span: Span) -> &FileInfo { + for file in &self.files { + if file.span_within(span) { + return file; + } + } + panic!("Invalid span with no related FileInfo!"); + } +} + +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct Span { + #[cfg(span_locations)] + lo: u32, + #[cfg(span_locations)] + hi: u32, +} + +impl Span { + #[cfg(not(span_locations))] + pub fn call_site() -> Span { + Span {} + } + + #[cfg(span_locations)] + pub fn call_site() -> Span { + Span { lo: 0, hi: 0 } + } + + #[cfg(procmacro2_semver_exempt)] + pub fn def_site() -> Span { + Span::call_site() + } + + #[cfg(procmacro2_semver_exempt)] + pub fn resolved_at(&self, _other: Span) -> Span { + // Stable spans consist only of line/column information, so + // `resolved_at` and `located_at` only select which span the + // caller wants line/column information from. + *self + } + + #[cfg(procmacro2_semver_exempt)] + pub fn located_at(&self, other: Span) -> Span { + other + } + + #[cfg(procmacro2_semver_exempt)] + pub fn source_file(&self) -> SourceFile { + SOURCE_MAP.with(|cm| { + let cm = cm.borrow(); + let fi = cm.fileinfo(*self); + SourceFile { + path: Path::new(&fi.name).to_owned(), + } + }) + } + + #[cfg(span_locations)] + pub fn start(&self) -> LineColumn { + SOURCE_MAP.with(|cm| { + let cm = cm.borrow(); + let fi = cm.fileinfo(*self); + fi.offset_line_column(self.lo as usize) + }) + } + + #[cfg(span_locations)] + pub fn end(&self) -> LineColumn { + SOURCE_MAP.with(|cm| { + let cm = cm.borrow(); + let fi = cm.fileinfo(*self); + fi.offset_line_column(self.hi as usize) + }) + } + + #[cfg(not(span_locations))] + pub fn join(&self, _other: Span) -> Option { + Some(Span {}) + } + + #[cfg(span_locations)] + pub fn join(&self, other: Span) -> Option { + SOURCE_MAP.with(|cm| { + let cm = cm.borrow(); + // If `other` is not within the same FileInfo as us, return None. + if !cm.fileinfo(*self).span_within(other) { + return None; + } + Some(Span { + lo: cmp::min(self.lo, other.lo), + hi: cmp::max(self.hi, other.hi), + }) + }) + } +} + +impl fmt::Debug for Span { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + #[cfg(procmacro2_semver_exempt)] + return write!(f, "bytes({}..{})", self.lo, self.hi); + + #[cfg(not(procmacro2_semver_exempt))] + write!(f, "Span") + } +} + +pub fn debug_span_field_if_nontrivial(debug: &mut fmt::DebugStruct, span: Span) { + if cfg!(procmacro2_semver_exempt) { + debug.field("span", &span); + } +} + +#[derive(Clone)] +pub struct Group { + delimiter: Delimiter, + stream: TokenStream, + span: Span, +} + +impl Group { + pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group { + Group { + delimiter, + stream, + span: Span::call_site(), + } + } + + pub fn delimiter(&self) -> Delimiter { + self.delimiter + } + + pub fn stream(&self) -> TokenStream { + self.stream.clone() + } + + pub fn span(&self) -> Span { + self.span + } + + pub fn span_open(&self) -> Span { + self.span + } + + pub fn span_close(&self) -> Span { + self.span + } + + pub fn set_span(&mut self, span: Span) { + self.span = span; + } +} + +impl fmt::Display for Group { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (left, right) = match self.delimiter { + Delimiter::Parenthesis => ("(", ")"), + Delimiter::Brace => ("{", "}"), + Delimiter::Bracket => ("[", "]"), + Delimiter::None => ("", ""), + }; + + f.write_str(left)?; + self.stream.fmt(f)?; + f.write_str(right)?; + + Ok(()) + } +} + +impl fmt::Debug for Group { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut debug = fmt.debug_struct("Group"); + debug.field("delimiter", &self.delimiter); + debug.field("stream", &self.stream); + #[cfg(procmacro2_semver_exempt)] + debug.field("span", &self.span); + debug.finish() + } +} + +#[derive(Clone)] +pub struct Ident { + sym: String, + span: Span, + raw: bool, +} + +impl Ident { + fn _new(string: &str, raw: bool, span: Span) -> Ident { + validate_ident(string); + + Ident { + sym: string.to_owned(), + span, + raw, + } + } + + pub fn new(string: &str, span: Span) -> Ident { + Ident::_new(string, false, span) + } + + pub fn new_raw(string: &str, span: Span) -> Ident { + Ident::_new(string, true, span) + } + + pub fn span(&self) -> Span { + self.span + } + + pub fn set_span(&mut self, span: Span) { + self.span = span; + } +} + +#[inline] +fn is_ident_start(c: char) -> bool { + ('a' <= c && c <= 'z') + || ('A' <= c && c <= 'Z') + || c == '_' + || (c > '\x7f' && UnicodeXID::is_xid_start(c)) +} + +#[inline] +fn is_ident_continue(c: char) -> bool { + ('a' <= c && c <= 'z') + || ('A' <= c && c <= 'Z') + || c == '_' + || ('0' <= c && c <= '9') + || (c > '\x7f' && UnicodeXID::is_xid_continue(c)) +} + +fn validate_ident(string: &str) { + let validate = string; + if validate.is_empty() { + panic!("Ident is not allowed to be empty; use Option"); + } + + if validate.bytes().all(|digit| digit >= b'0' && digit <= b'9') { + panic!("Ident cannot be a number; use Literal instead"); + } + + fn ident_ok(string: &str) -> bool { + let mut chars = string.chars(); + let first = chars.next().unwrap(); + if !is_ident_start(first) { + return false; + } + for ch in chars { + if !is_ident_continue(ch) { + return false; + } + } + true + } + + if !ident_ok(validate) { + panic!("{:?} is not a valid Ident", string); + } +} + +impl PartialEq for Ident { + fn eq(&self, other: &Ident) -> bool { + self.sym == other.sym && self.raw == other.raw + } +} + +impl PartialEq for Ident +where + T: ?Sized + AsRef, +{ + fn eq(&self, other: &T) -> bool { + let other = other.as_ref(); + if self.raw { + other.starts_with("r#") && self.sym == other[2..] + } else { + self.sym == other + } + } +} + +impl fmt::Display for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.raw { + "r#".fmt(f)?; + } + self.sym.fmt(f) + } +} + +impl fmt::Debug for Ident { + // Ident(proc_macro), Ident(r#union) + #[cfg(not(procmacro2_semver_exempt))] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut debug = f.debug_tuple("Ident"); + debug.field(&format_args!("{}", self)); + debug.finish() + } + + // Ident { + // sym: proc_macro, + // span: bytes(128..138) + // } + #[cfg(procmacro2_semver_exempt)] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut debug = f.debug_struct("Ident"); + debug.field("sym", &format_args!("{}", self)); + debug.field("span", &self.span); + debug.finish() + } +} + +#[derive(Clone)] +pub struct Literal { + text: String, + span: Span, +} + +macro_rules! suffixed_numbers { + ($($name:ident => $kind:ident,)*) => ($( + pub fn $name(n: $kind) -> Literal { + Literal::_new(format!(concat!("{}", stringify!($kind)), n)) + } + )*) +} + +macro_rules! unsuffixed_numbers { + ($($name:ident => $kind:ident,)*) => ($( + pub fn $name(n: $kind) -> Literal { + Literal::_new(n.to_string()) + } + )*) +} + +impl Literal { + fn _new(text: String) -> Literal { + Literal { + text, + span: Span::call_site(), + } + } + + suffixed_numbers! { + u8_suffixed => u8, + u16_suffixed => u16, + u32_suffixed => u32, + u64_suffixed => u64, + u128_suffixed => u128, + usize_suffixed => usize, + i8_suffixed => i8, + i16_suffixed => i16, + i32_suffixed => i32, + i64_suffixed => i64, + i128_suffixed => i128, + isize_suffixed => isize, + + f32_suffixed => f32, + f64_suffixed => f64, + } + + unsuffixed_numbers! { + u8_unsuffixed => u8, + u16_unsuffixed => u16, + u32_unsuffixed => u32, + u64_unsuffixed => u64, + u128_unsuffixed => u128, + usize_unsuffixed => usize, + i8_unsuffixed => i8, + i16_unsuffixed => i16, + i32_unsuffixed => i32, + i64_unsuffixed => i64, + i128_unsuffixed => i128, + isize_unsuffixed => isize, + } + + pub fn f32_unsuffixed(f: f32) -> Literal { + let mut s = f.to_string(); + if !s.contains(".") { + s.push_str(".0"); + } + Literal::_new(s) + } + + pub fn f64_unsuffixed(f: f64) -> Literal { + let mut s = f.to_string(); + if !s.contains(".") { + s.push_str(".0"); + } + Literal::_new(s) + } + + pub fn string(t: &str) -> Literal { + let mut text = String::with_capacity(t.len() + 2); + text.push('"'); + for c in t.chars() { + if c == '\'' { + // escape_default turns this into "\'" which is unnecessary. + text.push(c); + } else { + text.extend(c.escape_default()); + } + } + text.push('"'); + Literal::_new(text) + } + + pub fn character(t: char) -> Literal { + let mut text = String::new(); + text.push('\''); + if t == '"' { + // escape_default turns this into '\"' which is unnecessary. + text.push(t); + } else { + text.extend(t.escape_default()); + } + text.push('\''); + Literal::_new(text) + } + + pub fn byte_string(bytes: &[u8]) -> Literal { + let mut escaped = "b\"".to_string(); + for b in bytes { + match *b { + b'\0' => escaped.push_str(r"\0"), + b'\t' => escaped.push_str(r"\t"), + b'\n' => escaped.push_str(r"\n"), + b'\r' => escaped.push_str(r"\r"), + b'"' => escaped.push_str("\\\""), + b'\\' => escaped.push_str("\\\\"), + b'\x20'..=b'\x7E' => escaped.push(*b as char), + _ => escaped.push_str(&format!("\\x{:02X}", b)), + } + } + escaped.push('"'); + Literal::_new(escaped) + } + + pub fn span(&self) -> Span { + self.span + } + + pub fn set_span(&mut self, span: Span) { + self.span = span; + } + + pub fn subspan>(&self, _range: R) -> Option { + None + } +} + +impl fmt::Display for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.text.fmt(f) + } +} + +impl fmt::Debug for Literal { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut debug = fmt.debug_struct("Literal"); + debug.field("lit", &format_args!("{}", self.text)); + #[cfg(procmacro2_semver_exempt)] + debug.field("span", &self.span); + debug.finish() + } +} + +fn token_stream(mut input: Cursor) -> PResult { + let mut trees = Vec::new(); + loop { + let input_no_ws = skip_whitespace(input); + if input_no_ws.rest.len() == 0 { + break; + } + if let Ok((a, tokens)) = doc_comment(input_no_ws) { + input = a; + trees.extend(tokens); + continue; + } + + let (a, tt) = match token_tree(input_no_ws) { + Ok(p) => p, + Err(_) => break, + }; + trees.push(tt); + input = a; + } + Ok((input, TokenStream { inner: trees })) +} + +#[cfg(not(span_locations))] +fn spanned<'a, T>( + input: Cursor<'a>, + f: fn(Cursor<'a>) -> PResult<'a, T>, +) -> PResult<'a, (T, crate::Span)> { + let (a, b) = f(skip_whitespace(input))?; + Ok((a, ((b, crate::Span::_new_stable(Span::call_site()))))) +} + +#[cfg(span_locations)] +fn spanned<'a, T>( + input: Cursor<'a>, + f: fn(Cursor<'a>) -> PResult<'a, T>, +) -> PResult<'a, (T, crate::Span)> { + let input = skip_whitespace(input); + let lo = input.off; + let (a, b) = f(input)?; + let hi = a.off; + let span = crate::Span::_new_stable(Span { lo, hi }); + Ok((a, (b, span))) +} + +fn token_tree(input: Cursor) -> PResult { + let (rest, (mut tt, span)) = spanned(input, token_kind)?; + tt.set_span(span); + Ok((rest, tt)) +} + +named!(token_kind -> TokenTree, alt!( + map!(group, |g| TokenTree::Group(crate::Group::_new_stable(g))) + | + map!(literal, |l| TokenTree::Literal(crate::Literal::_new_stable(l))) // must be before symbol + | + map!(op, TokenTree::Punct) + | + symbol_leading_ws +)); + +named!(group -> Group, alt!( + delimited!( + punct!("("), + token_stream, + punct!(")") + ) => { |ts| Group::new(Delimiter::Parenthesis, ts) } + | + delimited!( + punct!("["), + token_stream, + punct!("]") + ) => { |ts| Group::new(Delimiter::Bracket, ts) } + | + delimited!( + punct!("{"), + token_stream, + punct!("}") + ) => { |ts| Group::new(Delimiter::Brace, ts) } +)); + +fn symbol_leading_ws(input: Cursor) -> PResult { + symbol(skip_whitespace(input)) +} + +fn symbol(input: Cursor) -> PResult { + let raw = input.starts_with("r#"); + let rest = input.advance((raw as usize) << 1); + + let (rest, sym) = symbol_not_raw(rest)?; + + if !raw { + let ident = crate::Ident::new(sym, crate::Span::call_site()); + return Ok((rest, ident.into())); + } + + if sym == "_" { + return Err(LexError); + } + + let ident = crate::Ident::_new_raw(sym, crate::Span::call_site()); + Ok((rest, ident.into())) +} + +fn symbol_not_raw(input: Cursor) -> PResult<&str> { + let mut chars = input.char_indices(); + + match chars.next() { + Some((_, ch)) if is_ident_start(ch) => {} + _ => return Err(LexError), + } + + let mut end = input.len(); + for (i, ch) in chars { + if !is_ident_continue(ch) { + end = i; + break; + } + } + + Ok((input.advance(end), &input.rest[..end])) +} + +fn literal(input: Cursor) -> PResult { + let input_no_ws = skip_whitespace(input); + + match literal_nocapture(input_no_ws) { + Ok((a, ())) => { + let start = input.len() - input_no_ws.len(); + let len = input_no_ws.len() - a.len(); + let end = start + len; + Ok((a, Literal::_new(input.rest[start..end].to_string()))) + } + Err(LexError) => Err(LexError), + } +} + +named!(literal_nocapture -> (), alt!( + string + | + byte_string + | + byte + | + character + | + float + | + int +)); + +named!(string -> (), alt!( + quoted_string + | + preceded!( + punct!("r"), + raw_string + ) => { |_| () } +)); + +named!(quoted_string -> (), do_parse!( + punct!("\"") >> + cooked_string >> + tag!("\"") >> + option!(symbol_not_raw) >> + (()) +)); + +fn cooked_string(input: Cursor) -> PResult<()> { + let mut chars = input.char_indices().peekable(); + while let Some((byte_offset, ch)) = chars.next() { + match ch { + '"' => { + return Ok((input.advance(byte_offset), ())); + } + '\r' => { + if let Some((_, '\n')) = chars.next() { + // ... + } else { + break; + } + } + '\\' => match chars.next() { + Some((_, 'x')) => { + if !backslash_x_char(&mut chars) { + break; + } + } + Some((_, 'n')) | Some((_, 'r')) | Some((_, 't')) | Some((_, '\\')) + | Some((_, '\'')) | Some((_, '"')) | Some((_, '0')) => {} + Some((_, 'u')) => { + if !backslash_u(&mut chars) { + break; + } + } + Some((_, '\n')) | Some((_, '\r')) => { + while let Some(&(_, ch)) = chars.peek() { + if ch.is_whitespace() { + chars.next(); + } else { + break; + } + } + } + _ => break, + }, + _ch => {} + } + } + Err(LexError) +} + +named!(byte_string -> (), alt!( + delimited!( + punct!("b\""), + cooked_byte_string, + tag!("\"") + ) => { |_| () } + | + preceded!( + punct!("br"), + raw_string + ) => { |_| () } +)); + +fn cooked_byte_string(mut input: Cursor) -> PResult<()> { + let mut bytes = input.bytes().enumerate(); + 'outer: while let Some((offset, b)) = bytes.next() { + match b { + b'"' => { + return Ok((input.advance(offset), ())); + } + b'\r' => { + if let Some((_, b'\n')) = bytes.next() { + // ... + } else { + break; + } + } + b'\\' => match bytes.next() { + Some((_, b'x')) => { + if !backslash_x_byte(&mut bytes) { + break; + } + } + Some((_, b'n')) | Some((_, b'r')) | Some((_, b't')) | Some((_, b'\\')) + | Some((_, b'0')) | Some((_, b'\'')) | Some((_, b'"')) => {} + Some((newline, b'\n')) | Some((newline, b'\r')) => { + let rest = input.advance(newline + 1); + for (offset, ch) in rest.char_indices() { + if !ch.is_whitespace() { + input = rest.advance(offset); + bytes = input.bytes().enumerate(); + continue 'outer; + } + } + break; + } + _ => break, + }, + b if b < 0x80 => {} + _ => break, + } + } + Err(LexError) +} + +fn raw_string(input: Cursor) -> PResult<()> { + let mut chars = input.char_indices(); + let mut n = 0; + while let Some((byte_offset, ch)) = chars.next() { + match ch { + '"' => { + n = byte_offset; + break; + } + '#' => {} + _ => return Err(LexError), + } + } + for (byte_offset, ch) in chars { + match ch { + '"' if input.advance(byte_offset + 1).starts_with(&input.rest[..n]) => { + let rest = input.advance(byte_offset + 1 + n); + return Ok((rest, ())); + } + '\r' => {} + _ => {} + } + } + Err(LexError) +} + +named!(byte -> (), do_parse!( + punct!("b") >> + tag!("'") >> + cooked_byte >> + tag!("'") >> + (()) +)); + +fn cooked_byte(input: Cursor) -> PResult<()> { + let mut bytes = input.bytes().enumerate(); + let ok = match bytes.next().map(|(_, b)| b) { + Some(b'\\') => match bytes.next().map(|(_, b)| b) { + Some(b'x') => backslash_x_byte(&mut bytes), + Some(b'n') | Some(b'r') | Some(b't') | Some(b'\\') | Some(b'0') | Some(b'\'') + | Some(b'"') => true, + _ => false, + }, + b => b.is_some(), + }; + if ok { + match bytes.next() { + Some((offset, _)) => { + if input.chars().as_str().is_char_boundary(offset) { + Ok((input.advance(offset), ())) + } else { + Err(LexError) + } + } + None => Ok((input.advance(input.len()), ())), + } + } else { + Err(LexError) + } +} + +named!(character -> (), do_parse!( + punct!("'") >> + cooked_char >> + tag!("'") >> + (()) +)); + +fn cooked_char(input: Cursor) -> PResult<()> { + let mut chars = input.char_indices(); + let ok = match chars.next().map(|(_, ch)| ch) { + Some('\\') => match chars.next().map(|(_, ch)| ch) { + Some('x') => backslash_x_char(&mut chars), + Some('u') => backslash_u(&mut chars), + Some('n') | Some('r') | Some('t') | Some('\\') | Some('0') | Some('\'') | Some('"') => { + true + } + _ => false, + }, + ch => ch.is_some(), + }; + if ok { + match chars.next() { + Some((idx, _)) => Ok((input.advance(idx), ())), + None => Ok((input.advance(input.len()), ())), + } + } else { + Err(LexError) + } +} + +macro_rules! next_ch { + ($chars:ident @ $pat:pat $(| $rest:pat)*) => { + match $chars.next() { + Some((_, ch)) => match ch { + $pat $(| $rest)* => ch, + _ => return false, + }, + None => return false + } + }; +} + +fn backslash_x_char(chars: &mut I) -> bool +where + I: Iterator, +{ + next_ch!(chars @ '0'..='7'); + next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F'); + true +} + +fn backslash_x_byte(chars: &mut I) -> bool +where + I: Iterator, +{ + next_ch!(chars @ b'0'..=b'9' | b'a'..=b'f' | b'A'..=b'F'); + next_ch!(chars @ b'0'..=b'9' | b'a'..=b'f' | b'A'..=b'F'); + true +} + +fn backslash_u(chars: &mut I) -> bool +where + I: Iterator, +{ + next_ch!(chars @ '{'); + next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F'); + loop { + let c = next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F' | '_' | '}'); + if c == '}' { + return true; + } + } +} + +fn float(input: Cursor) -> PResult<()> { + let (mut rest, ()) = float_digits(input)?; + if let Some(ch) = rest.chars().next() { + if is_ident_start(ch) { + rest = symbol_not_raw(rest)?.0; + } + } + word_break(rest) +} + +fn float_digits(input: Cursor) -> PResult<()> { + let mut chars = input.chars().peekable(); + match chars.next() { + Some(ch) if ch >= '0' && ch <= '9' => {} + _ => return Err(LexError), + } + + let mut len = 1; + let mut has_dot = false; + let mut has_exp = false; + while let Some(&ch) = chars.peek() { + match ch { + '0'..='9' | '_' => { + chars.next(); + len += 1; + } + '.' => { + if has_dot { + break; + } + chars.next(); + if chars + .peek() + .map(|&ch| ch == '.' || is_ident_start(ch)) + .unwrap_or(false) + { + return Err(LexError); + } + len += 1; + has_dot = true; + } + 'e' | 'E' => { + chars.next(); + len += 1; + has_exp = true; + break; + } + _ => break, + } + } + + let rest = input.advance(len); + if !(has_dot || has_exp || rest.starts_with("f32") || rest.starts_with("f64")) { + return Err(LexError); + } + + if has_exp { + let mut has_exp_value = false; + while let Some(&ch) = chars.peek() { + match ch { + '+' | '-' => { + if has_exp_value { + break; + } + chars.next(); + len += 1; + } + '0'..='9' => { + chars.next(); + len += 1; + has_exp_value = true; + } + '_' => { + chars.next(); + len += 1; + } + _ => break, + } + } + if !has_exp_value { + return Err(LexError); + } + } + + Ok((input.advance(len), ())) +} + +fn int(input: Cursor) -> PResult<()> { + let (mut rest, ()) = digits(input)?; + if let Some(ch) = rest.chars().next() { + if is_ident_start(ch) { + rest = symbol_not_raw(rest)?.0; + } + } + word_break(rest) +} + +fn digits(mut input: Cursor) -> PResult<()> { + let base = if input.starts_with("0x") { + input = input.advance(2); + 16 + } else if input.starts_with("0o") { + input = input.advance(2); + 8 + } else if input.starts_with("0b") { + input = input.advance(2); + 2 + } else { + 10 + }; + + let mut len = 0; + let mut empty = true; + for b in input.bytes() { + let digit = match b { + b'0'..=b'9' => (b - b'0') as u64, + b'a'..=b'f' => 10 + (b - b'a') as u64, + b'A'..=b'F' => 10 + (b - b'A') as u64, + b'_' => { + if empty && base == 10 { + return Err(LexError); + } + len += 1; + continue; + } + _ => break, + }; + if digit >= base { + return Err(LexError); + } + len += 1; + empty = false; + } + if empty { + Err(LexError) + } else { + Ok((input.advance(len), ())) + } +} + +fn op(input: Cursor) -> PResult { + let input = skip_whitespace(input); + match op_char(input) { + Ok((rest, '\'')) => { + symbol(rest)?; + Ok((rest, Punct::new('\'', Spacing::Joint))) + } + Ok((rest, ch)) => { + let kind = match op_char(rest) { + Ok(_) => Spacing::Joint, + Err(LexError) => Spacing::Alone, + }; + Ok((rest, Punct::new(ch, kind))) + } + Err(LexError) => Err(LexError), + } +} + +fn op_char(input: Cursor) -> PResult { + if input.starts_with("//") || input.starts_with("/*") { + // Do not accept `/` of a comment as an op. + return Err(LexError); + } + + let mut chars = input.chars(); + let first = match chars.next() { + Some(ch) => ch, + None => { + return Err(LexError); + } + }; + let recognized = "~!@#$%^&*-=+|;:,<.>/?'"; + if recognized.contains(first) { + Ok((input.advance(first.len_utf8()), first)) + } else { + Err(LexError) + } +} + +fn doc_comment(input: Cursor) -> PResult> { + let mut trees = Vec::new(); + let (rest, ((comment, inner), span)) = spanned(input, doc_comment_contents)?; + trees.push(TokenTree::Punct(Punct::new('#', Spacing::Alone))); + if inner { + trees.push(Punct::new('!', Spacing::Alone).into()); + } + let mut stream = vec![ + TokenTree::Ident(crate::Ident::new("doc", span)), + TokenTree::Punct(Punct::new('=', Spacing::Alone)), + TokenTree::Literal(crate::Literal::string(comment)), + ]; + for tt in stream.iter_mut() { + tt.set_span(span); + } + let group = Group::new(Delimiter::Bracket, stream.into_iter().collect()); + trees.push(crate::Group::_new_stable(group).into()); + for tt in trees.iter_mut() { + tt.set_span(span); + } + Ok((rest, trees)) +} + +named!(doc_comment_contents -> (&str, bool), alt!( + do_parse!( + punct!("//!") >> + s: take_until_newline_or_eof!() >> + ((s, true)) + ) + | + do_parse!( + option!(whitespace) >> + peek!(tag!("/*!")) >> + s: block_comment >> + ((s, true)) + ) + | + do_parse!( + punct!("///") >> + not!(tag!("/")) >> + s: take_until_newline_or_eof!() >> + ((s, false)) + ) + | + do_parse!( + option!(whitespace) >> + peek!(tuple!(tag!("/**"), not!(tag!("*")))) >> + s: block_comment >> + ((s, false)) + ) +)); --- a/vendor/proc-macro2/src/lib.rs +++ b/vendor/proc-macro2/src/lib.rs @@ -1,35 +1,65 @@ -//! A "shim crate" intended to multiplex the [`proc_macro`] API on to stable -//! Rust. +//! A wrapper around the procedural macro API of the compiler's [`proc_macro`] +//! crate. This library serves two purposes: //! -//! Procedural macros in Rust operate over the upstream -//! [`proc_macro::TokenStream`][ts] type. This type currently is quite -//! conservative and exposed no internal implementation details. Nightly -//! compilers, however, contain a much richer interface. This richer interface -//! allows fine-grained inspection of the token stream which avoids -//! stringification/re-lexing and also preserves span information. +//! [`proc_macro`]: https://doc.rust-lang.org/proc_macro/ +//! +//! - **Bring proc-macro-like functionality to other contexts like build.rs and +//! main.rs.** Types from `proc_macro` are entirely specific to procedural +//! macros and cannot ever exist in code outside of a procedural macro. +//! Meanwhile `proc_macro2` types may exist anywhere including non-macro code. +//! By developing foundational libraries like [syn] and [quote] against +//! `proc_macro2` rather than `proc_macro`, the procedural macro ecosystem +//! becomes easily applicable to many other use cases and we avoid +//! reimplementing non-macro equivalents of those libraries. +//! +//! - **Make procedural macros unit testable.** As a consequence of being +//! specific to procedural macros, nothing that uses `proc_macro` can be +//! executed from a unit test. In order for helper libraries or components of +//! a macro to be testable in isolation, they must be implemented using +//! `proc_macro2`. +//! +//! [syn]: https://github.com/dtolnay/syn +//! [quote]: https://github.com/dtolnay/quote +//! +//! # Usage +//! +//! The skeleton of a typical procedural macro typically looks like this: +//! +//! ``` +//! extern crate proc_macro; +//! +//! # const IGNORE: &str = stringify! { +//! #[proc_macro_derive(MyDerive)] +//! # }; +//! # #[cfg(wrap_proc_macro)] +//! pub fn my_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream { +//! let input = proc_macro2::TokenStream::from(input); //! -//! The upcoming APIs added to [`proc_macro`] upstream are the foundation for -//! productive procedural macros in the ecosystem. To help prepare the ecosystem -//! for using them this crate serves to both compile on stable and nightly and -//! mirrors the API-to-be. The intention is that procedural macros which switch -//! to use this crate will be trivially able to switch to the upstream -//! `proc_macro` crate once its API stabilizes. +//! let output: proc_macro2::TokenStream = { +//! /* transform input */ +//! # input +//! }; +//! +//! proc_macro::TokenStream::from(output) +//! } +//! ``` //! -//! In the meantime this crate also has a `nightly` Cargo feature which -//! enables it to reimplement itself with the unstable API of [`proc_macro`]. -//! This'll allow immediate usage of the beneficial upstream API, particularly -//! around preserving span information. +//! If parsing with [Syn], you'll use [`parse_macro_input!`] instead to +//! propagate parse errors correctly back to the compiler when parsing fails. //! -//! # Unstable Features +//! [`parse_macro_input!`]: https://docs.rs/syn/1.0/syn/macro.parse_macro_input.html //! -//! `proc-macro2` supports exporting some methods from `proc_macro` which are -//! currently highly unstable, and may not be stabilized in the first pass of -//! `proc_macro` stabilizations. These features are not exported by default. -//! Minor versions of `proc-macro2` may make breaking changes to them at any -//! time. +//! # Unstable features //! -//! To enable these features, the `procmacro2_semver_exempt` config flag must be -//! passed to rustc. +//! The default feature set of proc-macro2 tracks the most recent stable +//! compiler API. Functionality in `proc_macro` that is not yet stable is not +//! exposed by proc-macro2 by default. +//! +//! To opt into the additional APIs available in the most recent nightly +//! compiler, the `procmacro2_semver_exempt` config flag must be passed to +//! rustc. We will polyfill those nightly-only APIs back to Rust 1.31.0. As +//! these are unstable APIs that track the nightly compiler, minor versions of +//! proc-macro2 may make breaking changes to them at any time. //! //! ```sh //! RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build @@ -39,35 +69,40 @@ //! depends on your crate. This infectious nature is intentional, as it serves //! as a reminder that you are outside of the normal semver guarantees. //! -//! [`proc_macro`]: https://doc.rust-lang.org/proc_macro/ -//! [ts]: https://doc.rust-lang.org/proc_macro/struct.TokenStream.html +//! Semver exempt methods are marked as such in the proc-macro2 documentation. +//! +//! # Thread-Safety +//! +//! Most types in this crate are `!Sync` because the underlying compiler +//! types make use of thread-local memory, meaning they cannot be accessed from +//! a different thread. // Proc-macro2 types in rustdoc of other crates get linked to here. -#![doc(html_root_url = "https://docs.rs/proc-macro2/0.4.19")] -#![cfg_attr( - super_unstable, - feature(proc_macro_raw_ident, proc_macro_span) -)] +#![doc(html_root_url = "https://docs.rs/proc-macro2/1.0.6")] +#![cfg_attr(any(proc_macro_span, super_unstable), feature(proc_macro_span))] +#![cfg_attr(super_unstable, feature(proc_macro_raw_ident, proc_macro_def_site))] #[cfg(use_proc_macro)] extern crate proc_macro; -extern crate unicode_xid; use std::cmp::Ordering; use std::fmt; use std::hash::{Hash, Hasher}; use std::iter::FromIterator; use std::marker; +use std::ops::RangeBounds; +#[cfg(procmacro2_semver_exempt)] +use std::path::PathBuf; use std::rc::Rc; use std::str::FromStr; #[macro_use] mod strnom; -mod stable; +mod fallback; #[cfg(not(wrap_proc_macro))] -use stable as imp; -#[path = "unstable.rs"] +use crate::fallback as imp; +#[path = "wrapper.rs"] #[cfg(wrap_proc_macro)] mod imp; @@ -93,12 +128,12 @@ pub struct LexError { impl TokenStream { fn _new(inner: imp::TokenStream) -> TokenStream { TokenStream { - inner: inner, + inner, _marker: marker::PhantomData, } } - fn _new_stable(inner: stable::TokenStream) -> TokenStream { + fn _new_stable(inner: fallback::TokenStream) -> TokenStream { TokenStream { inner: inner.into(), _marker: marker::PhantomData, @@ -110,11 +145,6 @@ impl TokenStream { TokenStream::_new(imp::TokenStream::new()) } - #[deprecated(since = "0.4.4", note = "please use TokenStream::new")] - pub fn empty() -> TokenStream { - TokenStream::new() - } - /// Checks if this `TokenStream` is empty. pub fn is_empty(&self) -> bool { self.inner.is_empty() @@ -163,6 +193,12 @@ impl From for proc_macro::T } } +impl From for TokenStream { + fn from(token: TokenTree) -> Self { + TokenStream::_new(imp::TokenStream::from(token)) + } +} + impl Extend for TokenStream { fn extend>(&mut self, streams: I) { self.inner.extend(streams) @@ -211,19 +247,25 @@ impl fmt::Debug for LexError { } } -// Returned by reference, so we can't easily wrap it. -#[cfg(procmacro2_semver_exempt)] -pub use imp::FileName; - /// The source file of a given `Span`. /// /// This type is semver exempt and not exposed by default. #[cfg(procmacro2_semver_exempt)] #[derive(Clone, PartialEq, Eq)] -pub struct SourceFile(imp::SourceFile); +pub struct SourceFile { + inner: imp::SourceFile, + _marker: marker::PhantomData>, +} #[cfg(procmacro2_semver_exempt)] impl SourceFile { + fn _new(inner: imp::SourceFile) -> Self { + SourceFile { + inner, + _marker: marker::PhantomData, + } + } + /// Get the path to this source file. /// /// ### Note @@ -237,35 +279,29 @@ impl SourceFile { /// may not actually be valid. /// /// [`is_real`]: #method.is_real - pub fn path(&self) -> &FileName { - self.0.path() + pub fn path(&self) -> PathBuf { + self.inner.path() } /// Returns `true` if this source file is a real source file, and not /// generated by an external macro's expansion. pub fn is_real(&self) -> bool { - self.0.is_real() - } -} - -#[cfg(procmacro2_semver_exempt)] -impl AsRef for SourceFile { - fn as_ref(&self) -> &FileName { - self.0.path() + self.inner.is_real() } } #[cfg(procmacro2_semver_exempt)] impl fmt::Debug for SourceFile { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.0.fmt(f) + self.inner.fmt(f) } } /// A line-column pair representing the start or end of a `Span`. /// /// This type is semver exempt and not exposed by default. -#[cfg(procmacro2_semver_exempt)] +#[cfg(span_locations)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct LineColumn { /// The 1-indexed line in the source file on which the span starts or ends /// (inclusive). @@ -285,12 +321,12 @@ pub struct Span { impl Span { fn _new(inner: imp::Span) -> Span { Span { - inner: inner, + inner, _marker: marker::PhantomData, } } - fn _new_stable(inner: stable::Span) -> Span { + fn _new_stable(inner: fallback::Span) -> Span { Span { inner: inner.into(), _marker: marker::PhantomData, @@ -332,11 +368,26 @@ impl Span { Span::_new(self.inner.located_at(other.inner)) } - /// This method is only available when the `"nightly"` feature is enabled. + /// Convert `proc_macro2::Span` to `proc_macro::Span`. + /// + /// This method is available when building with a nightly compiler, or when + /// building with rustc 1.29+ *without* semver exempt features. + /// + /// # Panics + /// + /// Panics if called from outside of a procedural macro. Unlike + /// `proc_macro2::Span`, the `proc_macro::Span` type can only exist within + /// the context of a procedural macro invocation. + #[cfg(wrap_proc_macro)] + pub fn unwrap(self) -> proc_macro::Span { + self.inner.unwrap() + } + + // Soft deprecated. Please use Span::unwrap. + #[cfg(wrap_proc_macro)] #[doc(hidden)] - #[cfg(any(feature = "nightly", super_unstable))] pub fn unstable(self) -> proc_macro::Span { - self.inner.unstable() + self.unwrap() } /// The original source file into which this span points. @@ -344,44 +395,41 @@ impl Span { /// This method is semver exempt and not exposed by default. #[cfg(procmacro2_semver_exempt)] pub fn source_file(&self) -> SourceFile { - SourceFile(self.inner.source_file()) + SourceFile::_new(self.inner.source_file()) } /// Get the starting line/column in the source file for this span. /// - /// This method is semver exempt and not exposed by default. - #[cfg(procmacro2_semver_exempt)] + /// This method requires the `"span-locations"` feature to be enabled. + #[cfg(span_locations)] pub fn start(&self) -> LineColumn { let imp::LineColumn { line, column } = self.inner.start(); - LineColumn { - line: line, - column: column, - } + LineColumn { line, column } } /// Get the ending line/column in the source file for this span. /// - /// This method is semver exempt and not exposed by default. - #[cfg(procmacro2_semver_exempt)] + /// This method requires the `"span-locations"` feature to be enabled. + #[cfg(span_locations)] pub fn end(&self) -> LineColumn { let imp::LineColumn { line, column } = self.inner.end(); - LineColumn { - line: line, - column: column, - } + LineColumn { line, column } } /// Create a new span encompassing `self` and `other`. /// /// Returns `None` if `self` and `other` are from different files. /// - /// This method is semver exempt and not exposed by default. - #[cfg(procmacro2_semver_exempt)] + /// Warning: the underlying [`proc_macro::Span::join`] method is + /// nightly-only. When called from within a procedural macro not using a + /// nightly compiler, this method will always return `None`. + /// + /// [`proc_macro::Span::join`]: https://doc.rust-lang.org/proc_macro/struct.Span.html#method.join pub fn join(&self, other: Span) -> Option { self.inner.join(other.inner).map(Span::_new) } - /// Compares to spans to see if they're equal. + /// Compares two spans to see if they're equal. /// /// This method is semver exempt and not exposed by default. #[cfg(procmacro2_semver_exempt)] @@ -486,8 +534,7 @@ impl fmt::Debug for TokenTree { TokenTree::Ident(ref t) => { let mut debug = f.debug_struct("Ident"); debug.field("sym", &format_args!("{}", t)); - #[cfg(any(feature = "nightly", procmacro2_semver_exempt))] - debug.field("span", &t.span()); + imp::debug_span_field_if_nontrivial(&mut debug, t.span().inner); debug.finish() } TokenTree::Punct(ref t) => t.fmt(f), @@ -502,9 +549,7 @@ impl fmt::Debug for TokenTree { /// `Delimiter`s. #[derive(Clone)] pub struct Group { - delimiter: Delimiter, - stream: TokenStream, - span: Span, + inner: imp::Group, } /// Describes how a sequence of token trees is delimited. @@ -527,6 +572,16 @@ pub enum Delimiter { } impl Group { + fn _new(inner: imp::Group) -> Self { + Group { inner } + } + + fn _new_stable(inner: fallback::Group) -> Self { + Group { + inner: inner.into(), + } + } + /// Creates a new `Group` with the given delimiter and token stream. /// /// This constructor will set the span for this group to @@ -534,15 +589,13 @@ impl Group { /// method below. pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group { Group { - delimiter: delimiter, - stream: stream, - span: Span::call_site(), + inner: imp::Group::new(delimiter, stream.inner), } } /// Returns the delimiter of this `Group` pub fn delimiter(&self) -> Delimiter { - self.delimiter + self.inner.delimiter() } /// Returns the `TokenStream` of tokens that are delimited in this `Group`. @@ -550,13 +603,38 @@ impl Group { /// Note that the returned token stream does not include the delimiter /// returned above. pub fn stream(&self) -> TokenStream { - self.stream.clone() + TokenStream::_new(self.inner.stream()) } /// Returns the span for the delimiters of this token stream, spanning the /// entire `Group`. + /// + /// ```text + /// pub fn span(&self) -> Span { + /// ^^^^^^^ + /// ``` pub fn span(&self) -> Span { - self.span + Span::_new(self.inner.span()) + } + + /// Returns the span pointing to the opening delimiter of this group. + /// + /// ```text + /// pub fn span_open(&self) -> Span { + /// ^ + /// ``` + pub fn span_open(&self) -> Span { + Span::_new(self.inner.span_open()) + } + + /// Returns the span pointing to the closing delimiter of this group. + /// + /// ```text + /// pub fn span_close(&self) -> Span { + /// ^ + /// ``` + pub fn span_close(&self) -> Span { + Span::_new(self.inner.span_close()) } /// Configures the span for this `Group`'s delimiters, but not its internal @@ -566,7 +644,7 @@ impl Group { /// by this group, but rather it will only set the span of the delimiter /// tokens at the level of the `Group`. pub fn set_span(&mut self, span: Span) { - self.span = span; + self.inner.set_span(span.inner) } } @@ -574,30 +652,14 @@ impl Group { /// into the same group (modulo spans), except for possibly `TokenTree::Group`s /// with `Delimiter::None` delimiters. impl fmt::Display for Group { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let (left, right) = match self.delimiter { - Delimiter::Parenthesis => ("(", ")"), - Delimiter::Brace => ("{", "}"), - Delimiter::Bracket => ("[", "]"), - Delimiter::None => ("", ""), - }; - - f.write_str(left)?; - self.stream.fmt(f)?; - f.write_str(right)?; - - Ok(()) + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.inner, formatter) } } impl fmt::Debug for Group { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let mut debug = fmt.debug_struct("Group"); - debug.field("delimiter", &self.delimiter); - debug.field("stream", &self.stream); - #[cfg(procmacro2_semver_exempt)] - debug.field("span", &self.span); - debug.finish() + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.inner, formatter) } } @@ -618,7 +680,7 @@ pub struct Punct { pub enum Spacing { /// E.g. `+` is `Alone` in `+ =`, `+ident` or `+()`. Alone, - /// E.g. `+` is `Joint` in `+=` or `'#`. + /// E.g. `+` is `Joint` in `+=` or `'` is `Joint` in `'#`. /// /// Additionally, single quote `'` can join with identifiers to form /// lifetimes `'ident`. @@ -635,8 +697,8 @@ impl Punct { /// which can be further configured with the `set_span` method below. pub fn new(op: char, spacing: Spacing) -> Punct { Punct { - op: op, - spacing: spacing, + op, + spacing, span: Span::call_site(), } } @@ -679,8 +741,7 @@ impl fmt::Debug for Punct { let mut debug = fmt.debug_struct("Punct"); debug.field("op", &self.op); debug.field("spacing", &self.spacing); - #[cfg(procmacro2_semver_exempt)] - debug.field("span", &self.span); + imp::debug_span_field_if_nontrivial(&mut debug, self.span.inner); debug.finish() } } @@ -695,11 +756,11 @@ impl fmt::Debug for Punct { /// - A lifetime is not an identifier. Use `syn::Lifetime` instead. /// /// An identifier constructed with `Ident::new` is permitted to be a Rust -/// keyword, though parsing one through its [`Synom`] implementation rejects -/// Rust keywords. Use `call!(Ident::parse_any)` when parsing to match the +/// keyword, though parsing one through its [`Parse`] implementation rejects +/// Rust keywords. Use `input.call(Ident::parse_any)` when parsing to match the /// behaviour of `Ident::new`. /// -/// [`Synom`]: https://docs.rs/syn/0.14/syn/synom/trait.Synom.html +/// [`Parse`]: https://docs.rs/syn/1.0/syn/parse/trait.Parse.html /// /// # Examples /// @@ -707,9 +768,7 @@ impl fmt::Debug for Punct { /// A span must be provided explicitly which governs the name resolution /// behavior of the resulting identifier. /// -/// ```rust -/// extern crate proc_macro2; -/// +/// ``` /// use proc_macro2::{Ident, Span}; /// /// fn main() { @@ -721,13 +780,9 @@ impl fmt::Debug for Punct { /// /// An ident can be interpolated into a token stream using the `quote!` macro. /// -/// ```rust -/// #[macro_use] -/// extern crate quote; -/// -/// extern crate proc_macro2; -/// +/// ``` /// use proc_macro2::{Ident, Span}; +/// use quote::quote; /// /// fn main() { /// let ident = Ident::new("demo", Span::call_site()); @@ -744,9 +799,7 @@ impl fmt::Debug for Punct { /// A string representation of the ident is available through the `to_string()` /// method. /// -/// ```rust -/// # extern crate proc_macro2; -/// # +/// ``` /// # use proc_macro2::{Ident, Span}; /// # /// # let ident = Ident::new("another_identifier", Span::call_site()); @@ -766,7 +819,7 @@ pub struct Ident { impl Ident { fn _new(inner: imp::Ident) -> Ident { Ident { - inner: inner, + inner, _marker: marker::PhantomData, } } @@ -796,7 +849,12 @@ impl Ident { /// # Panics /// /// Panics if the input string is neither a keyword nor a legal variable - /// name. + /// name. If you are not sure whether the string contains an identifier and + /// need to handle an error case, use + /// syn::parse_str::<Ident> + /// rather than `Ident::new`. pub fn new(string: &str, span: Span) -> Ident { Ident::_new(imp::Ident::new(string, span.inner)) } @@ -929,12 +987,12 @@ macro_rules! unsuffixed_int_literals { impl Literal { fn _new(inner: imp::Literal) -> Literal { Literal { - inner: inner, + inner, _marker: marker::PhantomData, } } - fn _new_stable(inner: stable::Literal) -> Literal { + fn _new_stable(inner: fallback::Literal) -> Literal { Literal { inner: inner.into(), _marker: marker::PhantomData, @@ -946,11 +1004,13 @@ impl Literal { u16_suffixed => u16, u32_suffixed => u32, u64_suffixed => u64, + u128_suffixed => u128, usize_suffixed => usize, i8_suffixed => i8, i16_suffixed => i16, i32_suffixed => i32, i64_suffixed => i64, + i128_suffixed => i128, isize_suffixed => isize, } @@ -959,19 +1019,47 @@ impl Literal { u16_unsuffixed => u16, u32_unsuffixed => u32, u64_unsuffixed => u64, + u128_unsuffixed => u128, usize_unsuffixed => usize, i8_unsuffixed => i8, i16_unsuffixed => i16, i32_unsuffixed => i32, i64_unsuffixed => i64, + i128_unsuffixed => i128, isize_unsuffixed => isize, } + /// Creates a new unsuffixed floating-point literal. + /// + /// This constructor is similar to those like `Literal::i8_unsuffixed` where + /// the float's value is emitted directly into the token but no suffix is + /// used, so it may be inferred to be a `f64` later in the compiler. + /// Literals created from negative numbers may not survive rountrips through + /// `TokenStream` or strings and may be broken into two tokens (`-` and + /// positive literal). + /// + /// # Panics + /// + /// This function requires that the specified float is finite, for example + /// if it is infinity or NaN this function will panic. pub fn f64_unsuffixed(f: f64) -> Literal { assert!(f.is_finite()); Literal::_new(imp::Literal::f64_unsuffixed(f)) } + /// Creates a new suffixed floating-point literal. + /// + /// This constructor will create a literal like `1.0f64` where the value + /// specified is the preceding part of the token and `f64` is the suffix of + /// the token. This token will always be inferred to be an `f64` in the + /// compiler. Literals created from negative numbers may not survive + /// rountrips through `TokenStream` or strings and may be broken into two + /// tokens (`-` and positive literal). + /// + /// # Panics + /// + /// This function requires that the specified float is finite, for example + /// if it is infinity or NaN this function will panic. pub fn f64_suffixed(f: f64) -> Literal { assert!(f.is_finite()); Literal::_new(imp::Literal::f64_suffixed(f)) @@ -995,30 +1083,61 @@ impl Literal { Literal::_new(imp::Literal::f32_unsuffixed(f)) } + /// Creates a new suffixed floating-point literal. + /// + /// This constructor will create a literal like `1.0f32` where the value + /// specified is the preceding part of the token and `f32` is the suffix of + /// the token. This token will always be inferred to be an `f32` in the + /// compiler. Literals created from negative numbers may not survive + /// rountrips through `TokenStream` or strings and may be broken into two + /// tokens (`-` and positive literal). + /// + /// # Panics + /// + /// This function requires that the specified float is finite, for example + /// if it is infinity or NaN this function will panic. pub fn f32_suffixed(f: f32) -> Literal { assert!(f.is_finite()); Literal::_new(imp::Literal::f32_suffixed(f)) } + /// String literal. pub fn string(string: &str) -> Literal { Literal::_new(imp::Literal::string(string)) } + /// Character literal. pub fn character(ch: char) -> Literal { Literal::_new(imp::Literal::character(ch)) } + /// Byte string literal. pub fn byte_string(s: &[u8]) -> Literal { Literal::_new(imp::Literal::byte_string(s)) } + /// Returns the span encompassing this literal. pub fn span(&self) -> Span { Span::_new(self.inner.span()) } + /// Configures the span associated for this literal. pub fn set_span(&mut self, span: Span) { self.inner.set_span(span.inner); } + + /// Returns a `Span` that is a subset of `self.span()` containing only + /// the source bytes in range `range`. Returns `None` if the would-be + /// trimmed span is outside the bounds of `self`. + /// + /// Warning: the underlying [`proc_macro::Literal::subspan`] method is + /// nightly-only. When called from within a procedural macro not using a + /// nightly compiler, this method will always return `None`. + /// + /// [`proc_macro::Literal::subspan`]: https://doc.rust-lang.org/proc_macro/struct.Literal.html#method.subspan + pub fn subspan>(&self, range: R) -> Option { + self.inner.subspan(range).map(Span::_new) + } } impl fmt::Debug for Literal { @@ -1039,14 +1158,14 @@ pub mod token_stream { use std::marker; use std::rc::Rc; - use imp; - pub use TokenStream; - use TokenTree; + pub use crate::TokenStream; + use crate::{imp, TokenTree}; /// An iterator over `TokenStream`'s `TokenTree`s. /// /// The iteration is "shallow", e.g. the iterator doesn't recurse into /// delimited groups, and returns whole groups as token trees. + #[derive(Clone)] pub struct IntoIter { inner: imp::TokenTreeIter, _marker: marker::PhantomData>, --- a/vendor/proc-macro2/src/stable.rs +++ /dev/null @@ -1,1325 +0,0 @@ -#![cfg_attr(not(procmacro2_semver_exempt), allow(dead_code))] - -#[cfg(procmacro2_semver_exempt)] -use std::cell::RefCell; -#[cfg(procmacro2_semver_exempt)] -use std::cmp; -use std::fmt; -use std::iter; -use std::str::FromStr; -use std::vec; - -use strnom::{block_comment, skip_whitespace, whitespace, word_break, Cursor, PResult}; -use unicode_xid::UnicodeXID; - -use {Delimiter, Group, Punct, Spacing, TokenTree}; - -#[derive(Clone)] -pub struct TokenStream { - inner: Vec, -} - -#[derive(Debug)] -pub struct LexError; - -impl TokenStream { - pub fn new() -> TokenStream { - TokenStream { inner: Vec::new() } - } - - pub fn is_empty(&self) -> bool { - self.inner.len() == 0 - } -} - -#[cfg(procmacro2_semver_exempt)] -fn get_cursor(src: &str) -> Cursor { - // Create a dummy file & add it to the codemap - CODEMAP.with(|cm| { - let mut cm = cm.borrow_mut(); - let name = format!("", cm.files.len()); - let span = cm.add_file(&name, src); - Cursor { - rest: src, - off: span.lo, - } - }) -} - -#[cfg(not(procmacro2_semver_exempt))] -fn get_cursor(src: &str) -> Cursor { - Cursor { rest: src } -} - -impl FromStr for TokenStream { - type Err = LexError; - - fn from_str(src: &str) -> Result { - // Create a dummy file & add it to the codemap - let cursor = get_cursor(src); - - match token_stream(cursor) { - Ok((input, output)) => { - if skip_whitespace(input).len() != 0 { - Err(LexError) - } else { - Ok(output) - } - } - Err(LexError) => Err(LexError), - } - } -} - -impl fmt::Display for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut joint = false; - for (i, tt) in self.inner.iter().enumerate() { - if i != 0 && !joint { - write!(f, " ")?; - } - joint = false; - match *tt { - TokenTree::Group(ref tt) => { - let (start, end) = match tt.delimiter() { - Delimiter::Parenthesis => ("(", ")"), - Delimiter::Brace => ("{", "}"), - Delimiter::Bracket => ("[", "]"), - Delimiter::None => ("", ""), - }; - if tt.stream().into_iter().next().is_none() { - write!(f, "{} {}", start, end)? - } else { - write!(f, "{} {} {}", start, tt.stream(), end)? - } - } - TokenTree::Ident(ref tt) => write!(f, "{}", tt)?, - TokenTree::Punct(ref tt) => { - write!(f, "{}", tt.as_char())?; - match tt.spacing() { - Spacing::Alone => {} - Spacing::Joint => joint = true, - } - } - TokenTree::Literal(ref tt) => write!(f, "{}", tt)?, - } - } - - Ok(()) - } -} - -impl fmt::Debug for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("TokenStream ")?; - f.debug_list().entries(self.clone()).finish() - } -} - -#[cfg(use_proc_macro)] -impl From<::proc_macro::TokenStream> for TokenStream { - fn from(inner: ::proc_macro::TokenStream) -> TokenStream { - inner - .to_string() - .parse() - .expect("compiler token stream parse failed") - } -} - -#[cfg(use_proc_macro)] -impl From for ::proc_macro::TokenStream { - fn from(inner: TokenStream) -> ::proc_macro::TokenStream { - inner - .to_string() - .parse() - .expect("failed to parse to compiler tokens") - } -} - -impl From for TokenStream { - fn from(tree: TokenTree) -> TokenStream { - TokenStream { inner: vec![tree] } - } -} - -impl iter::FromIterator for TokenStream { - fn from_iter>(streams: I) -> Self { - let mut v = Vec::new(); - - for token in streams.into_iter() { - v.push(token); - } - - TokenStream { inner: v } - } -} - -impl iter::FromIterator for TokenStream { - fn from_iter>(streams: I) -> Self { - let mut v = Vec::new(); - - for stream in streams.into_iter() { - v.extend(stream.inner); - } - - TokenStream { inner: v } - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - self.inner.extend(streams); - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - self.inner - .extend(streams.into_iter().flat_map(|stream| stream)); - } -} - -pub type TokenTreeIter = vec::IntoIter; - -impl IntoIterator for TokenStream { - type Item = TokenTree; - type IntoIter = TokenTreeIter; - - fn into_iter(self) -> TokenTreeIter { - self.inner.into_iter() - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct FileName(String); - -#[allow(dead_code)] -pub fn file_name(s: String) -> FileName { - FileName(s) -} - -impl fmt::Display for FileName { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.0.fmt(f) - } -} - -#[derive(Clone, PartialEq, Eq)] -pub struct SourceFile { - name: FileName, -} - -impl SourceFile { - /// Get the path to this source file as a string. - pub fn path(&self) -> &FileName { - &self.name - } - - pub fn is_real(&self) -> bool { - // XXX(nika): Support real files in the future? - false - } -} - -impl AsRef for SourceFile { - fn as_ref(&self) -> &FileName { - self.path() - } -} - -impl fmt::Debug for SourceFile { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("SourceFile") - .field("path", &self.path()) - .field("is_real", &self.is_real()) - .finish() - } -} - -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct LineColumn { - pub line: usize, - pub column: usize, -} - -#[cfg(procmacro2_semver_exempt)] -thread_local! { - static CODEMAP: RefCell = RefCell::new(Codemap { - // NOTE: We start with a single dummy file which all call_site() and - // def_site() spans reference. - files: vec![FileInfo { - name: "".to_owned(), - span: Span { lo: 0, hi: 0 }, - lines: vec![0], - }], - }); -} - -#[cfg(procmacro2_semver_exempt)] -struct FileInfo { - name: String, - span: Span, - lines: Vec, -} - -#[cfg(procmacro2_semver_exempt)] -impl FileInfo { - fn offset_line_column(&self, offset: usize) -> LineColumn { - assert!(self.span_within(Span { - lo: offset as u32, - hi: offset as u32 - })); - let offset = offset - self.span.lo as usize; - match self.lines.binary_search(&offset) { - Ok(found) => LineColumn { - line: found + 1, - column: 0, - }, - Err(idx) => LineColumn { - line: idx, - column: offset - self.lines[idx - 1], - }, - } - } - - fn span_within(&self, span: Span) -> bool { - span.lo >= self.span.lo && span.hi <= self.span.hi - } -} - -/// Computesthe offsets of each line in the given source string. -#[cfg(procmacro2_semver_exempt)] -fn lines_offsets(s: &str) -> Vec { - let mut lines = vec![0]; - let mut prev = 0; - while let Some(len) = s[prev..].find('\n') { - prev += len + 1; - lines.push(prev); - } - lines -} - -#[cfg(procmacro2_semver_exempt)] -struct Codemap { - files: Vec, -} - -#[cfg(procmacro2_semver_exempt)] -impl Codemap { - fn next_start_pos(&self) -> u32 { - // Add 1 so there's always space between files. - // - // We'll always have at least 1 file, as we initialize our files list - // with a dummy file. - self.files.last().unwrap().span.hi + 1 - } - - fn add_file(&mut self, name: &str, src: &str) -> Span { - let lines = lines_offsets(src); - let lo = self.next_start_pos(); - // XXX(nika): Shouild we bother doing a checked cast or checked add here? - let span = Span { - lo: lo, - hi: lo + (src.len() as u32), - }; - - self.files.push(FileInfo { - name: name.to_owned(), - span: span, - lines: lines, - }); - - span - } - - fn fileinfo(&self, span: Span) -> &FileInfo { - for file in &self.files { - if file.span_within(span) { - return file; - } - } - panic!("Invalid span with no related FileInfo!"); - } -} - -#[derive(Clone, Copy, PartialEq, Eq)] -pub struct Span { - #[cfg(procmacro2_semver_exempt)] - lo: u32, - #[cfg(procmacro2_semver_exempt)] - hi: u32, -} - -impl Span { - #[cfg(not(procmacro2_semver_exempt))] - pub fn call_site() -> Span { - Span {} - } - - #[cfg(procmacro2_semver_exempt)] - pub fn call_site() -> Span { - Span { lo: 0, hi: 0 } - } - - pub fn def_site() -> Span { - Span::call_site() - } - - pub fn resolved_at(&self, _other: Span) -> Span { - // Stable spans consist only of line/column information, so - // `resolved_at` and `located_at` only select which span the - // caller wants line/column information from. - *self - } - - pub fn located_at(&self, other: Span) -> Span { - other - } - - #[cfg(procmacro2_semver_exempt)] - pub fn source_file(&self) -> SourceFile { - CODEMAP.with(|cm| { - let cm = cm.borrow(); - let fi = cm.fileinfo(*self); - SourceFile { - name: FileName(fi.name.clone()), - } - }) - } - - #[cfg(procmacro2_semver_exempt)] - pub fn start(&self) -> LineColumn { - CODEMAP.with(|cm| { - let cm = cm.borrow(); - let fi = cm.fileinfo(*self); - fi.offset_line_column(self.lo as usize) - }) - } - - #[cfg(procmacro2_semver_exempt)] - pub fn end(&self) -> LineColumn { - CODEMAP.with(|cm| { - let cm = cm.borrow(); - let fi = cm.fileinfo(*self); - fi.offset_line_column(self.hi as usize) - }) - } - - #[cfg(procmacro2_semver_exempt)] - pub fn join(&self, other: Span) -> Option { - CODEMAP.with(|cm| { - let cm = cm.borrow(); - // If `other` is not within the same FileInfo as us, return None. - if !cm.fileinfo(*self).span_within(other) { - return None; - } - Some(Span { - lo: cmp::min(self.lo, other.lo), - hi: cmp::max(self.hi, other.hi), - }) - }) - } -} - -impl fmt::Debug for Span { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - #[cfg(procmacro2_semver_exempt)] - return write!(f, "bytes({}..{})", self.lo, self.hi); - - #[cfg(not(procmacro2_semver_exempt))] - write!(f, "Span") - } -} - -#[derive(Clone)] -pub struct Ident { - sym: String, - span: Span, - raw: bool, -} - -impl Ident { - fn _new(string: &str, raw: bool, span: Span) -> Ident { - validate_term(string); - - Ident { - sym: string.to_owned(), - span: span, - raw: raw, - } - } - - pub fn new(string: &str, span: Span) -> Ident { - Ident::_new(string, false, span) - } - - pub fn new_raw(string: &str, span: Span) -> Ident { - Ident::_new(string, true, span) - } - - pub fn span(&self) -> Span { - self.span - } - - pub fn set_span(&mut self, span: Span) { - self.span = span; - } -} - -#[inline] -fn is_ident_start(c: char) -> bool { - ('a' <= c && c <= 'z') - || ('A' <= c && c <= 'Z') - || c == '_' - || (c > '\x7f' && UnicodeXID::is_xid_start(c)) -} - -#[inline] -fn is_ident_continue(c: char) -> bool { - ('a' <= c && c <= 'z') - || ('A' <= c && c <= 'Z') - || c == '_' - || ('0' <= c && c <= '9') - || (c > '\x7f' && UnicodeXID::is_xid_continue(c)) -} - -fn validate_term(string: &str) { - let validate = string; - if validate.is_empty() { - panic!("Ident is not allowed to be empty; use Option"); - } - - if validate.bytes().all(|digit| digit >= b'0' && digit <= b'9') { - panic!("Ident cannot be a number; use Literal instead"); - } - - fn ident_ok(string: &str) -> bool { - let mut chars = string.chars(); - let first = chars.next().unwrap(); - if !is_ident_start(first) { - return false; - } - for ch in chars { - if !is_ident_continue(ch) { - return false; - } - } - true - } - - if !ident_ok(validate) { - panic!("{:?} is not a valid Ident", string); - } -} - -impl PartialEq for Ident { - fn eq(&self, other: &Ident) -> bool { - self.sym == other.sym && self.raw == other.raw - } -} - -impl PartialEq for Ident -where - T: ?Sized + AsRef, -{ - fn eq(&self, other: &T) -> bool { - let other = other.as_ref(); - if self.raw { - other.starts_with("r#") && self.sym == other[2..] - } else { - self.sym == other - } - } -} - -impl fmt::Display for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.raw { - "r#".fmt(f)?; - } - self.sym.fmt(f) - } -} - -impl fmt::Debug for Ident { - // Ident(proc_macro), Ident(r#union) - #[cfg(not(procmacro2_semver_exempt))] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut debug = f.debug_tuple("Ident"); - debug.field(&format_args!("{}", self)); - debug.finish() - } - - // Ident { - // sym: proc_macro, - // span: bytes(128..138) - // } - #[cfg(procmacro2_semver_exempt)] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut debug = f.debug_struct("Ident"); - debug.field("sym", &format_args!("{}", self)); - debug.field("span", &self.span); - debug.finish() - } -} - -#[derive(Clone)] -pub struct Literal { - text: String, - span: Span, -} - -macro_rules! suffixed_numbers { - ($($name:ident => $kind:ident,)*) => ($( - pub fn $name(n: $kind) -> Literal { - Literal::_new(format!(concat!("{}", stringify!($kind)), n)) - } - )*) -} - -macro_rules! unsuffixed_numbers { - ($($name:ident => $kind:ident,)*) => ($( - pub fn $name(n: $kind) -> Literal { - Literal::_new(n.to_string()) - } - )*) -} - -impl Literal { - fn _new(text: String) -> Literal { - Literal { - text: text, - span: Span::call_site(), - } - } - - suffixed_numbers! { - u8_suffixed => u8, - u16_suffixed => u16, - u32_suffixed => u32, - u64_suffixed => u64, - usize_suffixed => usize, - i8_suffixed => i8, - i16_suffixed => i16, - i32_suffixed => i32, - i64_suffixed => i64, - isize_suffixed => isize, - - f32_suffixed => f32, - f64_suffixed => f64, - } - - unsuffixed_numbers! { - u8_unsuffixed => u8, - u16_unsuffixed => u16, - u32_unsuffixed => u32, - u64_unsuffixed => u64, - usize_unsuffixed => usize, - i8_unsuffixed => i8, - i16_unsuffixed => i16, - i32_unsuffixed => i32, - i64_unsuffixed => i64, - isize_unsuffixed => isize, - } - - pub fn f32_unsuffixed(f: f32) -> Literal { - let mut s = f.to_string(); - if !s.contains(".") { - s.push_str(".0"); - } - Literal::_new(s) - } - - pub fn f64_unsuffixed(f: f64) -> Literal { - let mut s = f.to_string(); - if !s.contains(".") { - s.push_str(".0"); - } - Literal::_new(s) - } - - pub fn string(t: &str) -> Literal { - let mut s = t - .chars() - .flat_map(|c| c.escape_default()) - .collect::(); - s.push('"'); - s.insert(0, '"'); - Literal::_new(s) - } - - pub fn character(t: char) -> Literal { - Literal::_new(format!("'{}'", t.escape_default().collect::())) - } - - pub fn byte_string(bytes: &[u8]) -> Literal { - let mut escaped = "b\"".to_string(); - for b in bytes { - match *b { - b'\0' => escaped.push_str(r"\0"), - b'\t' => escaped.push_str(r"\t"), - b'\n' => escaped.push_str(r"\n"), - b'\r' => escaped.push_str(r"\r"), - b'"' => escaped.push_str("\\\""), - b'\\' => escaped.push_str("\\\\"), - b'\x20'...b'\x7E' => escaped.push(*b as char), - _ => escaped.push_str(&format!("\\x{:02X}", b)), - } - } - escaped.push('"'); - Literal::_new(escaped) - } - - pub fn span(&self) -> Span { - self.span - } - - pub fn set_span(&mut self, span: Span) { - self.span = span; - } -} - -impl fmt::Display for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.text.fmt(f) - } -} - -impl fmt::Debug for Literal { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let mut debug = fmt.debug_struct("Literal"); - debug.field("lit", &format_args!("{}", self.text)); - #[cfg(procmacro2_semver_exempt)] - debug.field("span", &self.span); - debug.finish() - } -} - -fn token_stream(mut input: Cursor) -> PResult { - let mut trees = Vec::new(); - loop { - let input_no_ws = skip_whitespace(input); - if input_no_ws.rest.len() == 0 { - break; - } - if let Ok((a, tokens)) = doc_comment(input_no_ws) { - input = a; - trees.extend(tokens); - continue; - } - - let (a, tt) = match token_tree(input_no_ws) { - Ok(p) => p, - Err(_) => break, - }; - trees.push(tt); - input = a; - } - Ok((input, TokenStream { inner: trees })) -} - -#[cfg(not(procmacro2_semver_exempt))] -fn spanned<'a, T>( - input: Cursor<'a>, - f: fn(Cursor<'a>) -> PResult<'a, T>, -) -> PResult<'a, (T, ::Span)> { - let (a, b) = f(skip_whitespace(input))?; - Ok((a, ((b, ::Span::_new_stable(Span {}))))) -} - -#[cfg(procmacro2_semver_exempt)] -fn spanned<'a, T>( - input: Cursor<'a>, - f: fn(Cursor<'a>) -> PResult<'a, T>, -) -> PResult<'a, (T, ::Span)> { - let input = skip_whitespace(input); - let lo = input.off; - let (a, b) = f(input)?; - let hi = a.off; - let span = ::Span::_new_stable(Span { lo: lo, hi: hi }); - Ok((a, (b, span))) -} - -fn token_tree(input: Cursor) -> PResult { - let (rest, (mut tt, span)) = spanned(input, token_kind)?; - tt.set_span(span); - Ok((rest, tt)) -} - -named!(token_kind -> TokenTree, alt!( - map!(group, TokenTree::Group) - | - map!(literal, |l| TokenTree::Literal(::Literal::_new_stable(l))) // must be before symbol - | - map!(op, TokenTree::Punct) - | - symbol_leading_ws -)); - -named!(group -> Group, alt!( - delimited!( - punct!("("), - token_stream, - punct!(")") - ) => { |ts| Group::new(Delimiter::Parenthesis, ::TokenStream::_new_stable(ts)) } - | - delimited!( - punct!("["), - token_stream, - punct!("]") - ) => { |ts| Group::new(Delimiter::Bracket, ::TokenStream::_new_stable(ts)) } - | - delimited!( - punct!("{"), - token_stream, - punct!("}") - ) => { |ts| Group::new(Delimiter::Brace, ::TokenStream::_new_stable(ts)) } -)); - -fn symbol_leading_ws(input: Cursor) -> PResult { - symbol(skip_whitespace(input)) -} - -fn symbol(input: Cursor) -> PResult { - let mut chars = input.char_indices(); - - let raw = input.starts_with("r#"); - if raw { - chars.next(); - chars.next(); - } - - match chars.next() { - Some((_, ch)) if is_ident_start(ch) => {} - _ => return Err(LexError), - } - - let mut end = input.len(); - for (i, ch) in chars { - if !is_ident_continue(ch) { - end = i; - break; - } - } - - let a = &input.rest[..end]; - if a == "r#_" { - Err(LexError) - } else { - let ident = if raw { - ::Ident::_new_raw(&a[2..], ::Span::call_site()) - } else { - ::Ident::new(a, ::Span::call_site()) - }; - Ok((input.advance(end), ident.into())) - } -} - -fn literal(input: Cursor) -> PResult { - let input_no_ws = skip_whitespace(input); - - match literal_nocapture(input_no_ws) { - Ok((a, ())) => { - let start = input.len() - input_no_ws.len(); - let len = input_no_ws.len() - a.len(); - let end = start + len; - Ok((a, Literal::_new(input.rest[start..end].to_string()))) - } - Err(LexError) => Err(LexError), - } -} - -named!(literal_nocapture -> (), alt!( - string - | - byte_string - | - byte - | - character - | - float - | - int -)); - -named!(string -> (), alt!( - quoted_string - | - preceded!( - punct!("r"), - raw_string - ) => { |_| () } -)); - -named!(quoted_string -> (), delimited!( - punct!("\""), - cooked_string, - tag!("\"") -)); - -fn cooked_string(input: Cursor) -> PResult<()> { - let mut chars = input.char_indices().peekable(); - while let Some((byte_offset, ch)) = chars.next() { - match ch { - '"' => { - return Ok((input.advance(byte_offset), ())); - } - '\r' => { - if let Some((_, '\n')) = chars.next() { - // ... - } else { - break; - } - } - '\\' => match chars.next() { - Some((_, 'x')) => { - if !backslash_x_char(&mut chars) { - break; - } - } - Some((_, 'n')) | Some((_, 'r')) | Some((_, 't')) | Some((_, '\\')) - | Some((_, '\'')) | Some((_, '"')) | Some((_, '0')) => {} - Some((_, 'u')) => { - if !backslash_u(&mut chars) { - break; - } - } - Some((_, '\n')) | Some((_, '\r')) => { - while let Some(&(_, ch)) = chars.peek() { - if ch.is_whitespace() { - chars.next(); - } else { - break; - } - } - } - _ => break, - }, - _ch => {} - } - } - Err(LexError) -} - -named!(byte_string -> (), alt!( - delimited!( - punct!("b\""), - cooked_byte_string, - tag!("\"") - ) => { |_| () } - | - preceded!( - punct!("br"), - raw_string - ) => { |_| () } -)); - -fn cooked_byte_string(mut input: Cursor) -> PResult<()> { - let mut bytes = input.bytes().enumerate(); - 'outer: while let Some((offset, b)) = bytes.next() { - match b { - b'"' => { - return Ok((input.advance(offset), ())); - } - b'\r' => { - if let Some((_, b'\n')) = bytes.next() { - // ... - } else { - break; - } - } - b'\\' => match bytes.next() { - Some((_, b'x')) => { - if !backslash_x_byte(&mut bytes) { - break; - } - } - Some((_, b'n')) | Some((_, b'r')) | Some((_, b't')) | Some((_, b'\\')) - | Some((_, b'0')) | Some((_, b'\'')) | Some((_, b'"')) => {} - Some((newline, b'\n')) | Some((newline, b'\r')) => { - let rest = input.advance(newline + 1); - for (offset, ch) in rest.char_indices() { - if !ch.is_whitespace() { - input = rest.advance(offset); - bytes = input.bytes().enumerate(); - continue 'outer; - } - } - break; - } - _ => break, - }, - b if b < 0x80 => {} - _ => break, - } - } - Err(LexError) -} - -fn raw_string(input: Cursor) -> PResult<()> { - let mut chars = input.char_indices(); - let mut n = 0; - while let Some((byte_offset, ch)) = chars.next() { - match ch { - '"' => { - n = byte_offset; - break; - } - '#' => {} - _ => return Err(LexError), - } - } - for (byte_offset, ch) in chars { - match ch { - '"' if input.advance(byte_offset + 1).starts_with(&input.rest[..n]) => { - let rest = input.advance(byte_offset + 1 + n); - return Ok((rest, ())); - } - '\r' => {} - _ => {} - } - } - Err(LexError) -} - -named!(byte -> (), do_parse!( - punct!("b") >> - tag!("'") >> - cooked_byte >> - tag!("'") >> - (()) -)); - -fn cooked_byte(input: Cursor) -> PResult<()> { - let mut bytes = input.bytes().enumerate(); - let ok = match bytes.next().map(|(_, b)| b) { - Some(b'\\') => match bytes.next().map(|(_, b)| b) { - Some(b'x') => backslash_x_byte(&mut bytes), - Some(b'n') | Some(b'r') | Some(b't') | Some(b'\\') | Some(b'0') | Some(b'\'') - | Some(b'"') => true, - _ => false, - }, - b => b.is_some(), - }; - if ok { - match bytes.next() { - Some((offset, _)) => { - if input.chars().as_str().is_char_boundary(offset) { - Ok((input.advance(offset), ())) - } else { - Err(LexError) - } - } - None => Ok((input.advance(input.len()), ())), - } - } else { - Err(LexError) - } -} - -named!(character -> (), do_parse!( - punct!("'") >> - cooked_char >> - tag!("'") >> - (()) -)); - -fn cooked_char(input: Cursor) -> PResult<()> { - let mut chars = input.char_indices(); - let ok = match chars.next().map(|(_, ch)| ch) { - Some('\\') => match chars.next().map(|(_, ch)| ch) { - Some('x') => backslash_x_char(&mut chars), - Some('u') => backslash_u(&mut chars), - Some('n') | Some('r') | Some('t') | Some('\\') | Some('0') | Some('\'') | Some('"') => { - true - } - _ => false, - }, - ch => ch.is_some(), - }; - if ok { - match chars.next() { - Some((idx, _)) => Ok((input.advance(idx), ())), - None => Ok((input.advance(input.len()), ())), - } - } else { - Err(LexError) - } -} - -macro_rules! next_ch { - ($chars:ident @ $pat:pat $(| $rest:pat)*) => { - match $chars.next() { - Some((_, ch)) => match ch { - $pat $(| $rest)* => ch, - _ => return false, - }, - None => return false - } - }; -} - -fn backslash_x_char(chars: &mut I) -> bool -where - I: Iterator, -{ - next_ch!(chars @ '0'...'7'); - next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F'); - true -} - -fn backslash_x_byte(chars: &mut I) -> bool -where - I: Iterator, -{ - next_ch!(chars @ b'0'...b'9' | b'a'...b'f' | b'A'...b'F'); - next_ch!(chars @ b'0'...b'9' | b'a'...b'f' | b'A'...b'F'); - true -} - -fn backslash_u(chars: &mut I) -> bool -where - I: Iterator, -{ - next_ch!(chars @ '{'); - next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F'); - loop { - let c = next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F' | '_' | '}'); - if c == '}' { - return true; - } - } -} - -fn float(input: Cursor) -> PResult<()> { - let (rest, ()) = float_digits(input)?; - for suffix in &["f32", "f64"] { - if rest.starts_with(suffix) { - return word_break(rest.advance(suffix.len())); - } - } - word_break(rest) -} - -fn float_digits(input: Cursor) -> PResult<()> { - let mut chars = input.chars().peekable(); - match chars.next() { - Some(ch) if ch >= '0' && ch <= '9' => {} - _ => return Err(LexError), - } - - let mut len = 1; - let mut has_dot = false; - let mut has_exp = false; - while let Some(&ch) = chars.peek() { - match ch { - '0'...'9' | '_' => { - chars.next(); - len += 1; - } - '.' => { - if has_dot { - break; - } - chars.next(); - if chars - .peek() - .map(|&ch| ch == '.' || UnicodeXID::is_xid_start(ch)) - .unwrap_or(false) - { - return Err(LexError); - } - len += 1; - has_dot = true; - } - 'e' | 'E' => { - chars.next(); - len += 1; - has_exp = true; - break; - } - _ => break, - } - } - - let rest = input.advance(len); - if !(has_dot || has_exp || rest.starts_with("f32") || rest.starts_with("f64")) { - return Err(LexError); - } - - if has_exp { - let mut has_exp_value = false; - while let Some(&ch) = chars.peek() { - match ch { - '+' | '-' => { - if has_exp_value { - break; - } - chars.next(); - len += 1; - } - '0'...'9' => { - chars.next(); - len += 1; - has_exp_value = true; - } - '_' => { - chars.next(); - len += 1; - } - _ => break, - } - } - if !has_exp_value { - return Err(LexError); - } - } - - Ok((input.advance(len), ())) -} - -fn int(input: Cursor) -> PResult<()> { - let (rest, ()) = digits(input)?; - for suffix in &[ - "isize", "i8", "i16", "i32", "i64", "i128", "usize", "u8", "u16", "u32", "u64", "u128", - ] { - if rest.starts_with(suffix) { - return word_break(rest.advance(suffix.len())); - } - } - word_break(rest) -} - -fn digits(mut input: Cursor) -> PResult<()> { - let base = if input.starts_with("0x") { - input = input.advance(2); - 16 - } else if input.starts_with("0o") { - input = input.advance(2); - 8 - } else if input.starts_with("0b") { - input = input.advance(2); - 2 - } else { - 10 - }; - - let mut len = 0; - let mut empty = true; - for b in input.bytes() { - let digit = match b { - b'0'...b'9' => (b - b'0') as u64, - b'a'...b'f' => 10 + (b - b'a') as u64, - b'A'...b'F' => 10 + (b - b'A') as u64, - b'_' => { - if empty && base == 10 { - return Err(LexError); - } - len += 1; - continue; - } - _ => break, - }; - if digit >= base { - return Err(LexError); - } - len += 1; - empty = false; - } - if empty { - Err(LexError) - } else { - Ok((input.advance(len), ())) - } -} - -fn op(input: Cursor) -> PResult { - let input = skip_whitespace(input); - match op_char(input) { - Ok((rest, '\'')) => { - symbol(rest)?; - Ok((rest, Punct::new('\'', Spacing::Joint))) - } - Ok((rest, ch)) => { - let kind = match op_char(rest) { - Ok(_) => Spacing::Joint, - Err(LexError) => Spacing::Alone, - }; - Ok((rest, Punct::new(ch, kind))) - } - Err(LexError) => Err(LexError), - } -} - -fn op_char(input: Cursor) -> PResult { - if input.starts_with("//") || input.starts_with("/*") { - // Do not accept `/` of a comment as an op. - return Err(LexError); - } - - let mut chars = input.chars(); - let first = match chars.next() { - Some(ch) => ch, - None => { - return Err(LexError); - } - }; - let recognized = "~!@#$%^&*-=+|;:,<.>/?'"; - if recognized.contains(first) { - Ok((input.advance(first.len_utf8()), first)) - } else { - Err(LexError) - } -} - -fn doc_comment(input: Cursor) -> PResult> { - let mut trees = Vec::new(); - let (rest, ((comment, inner), span)) = spanned(input, doc_comment_contents)?; - trees.push(TokenTree::Punct(Punct::new('#', Spacing::Alone))); - if inner { - trees.push(Punct::new('!', Spacing::Alone).into()); - } - let mut stream = vec![ - TokenTree::Ident(::Ident::new("doc", span)), - TokenTree::Punct(Punct::new('=', Spacing::Alone)), - TokenTree::Literal(::Literal::string(comment)), - ]; - for tt in stream.iter_mut() { - tt.set_span(span); - } - trees.push(Group::new(Delimiter::Bracket, stream.into_iter().collect()).into()); - for tt in trees.iter_mut() { - tt.set_span(span); - } - Ok((rest, trees)) -} - -named!(doc_comment_contents -> (&str, bool), alt!( - do_parse!( - punct!("//!") >> - s: take_until_newline_or_eof!() >> - ((s, true)) - ) - | - do_parse!( - option!(whitespace) >> - peek!(tag!("/*!")) >> - s: block_comment >> - ((s, true)) - ) - | - do_parse!( - punct!("///") >> - not!(tag!("/")) >> - s: take_until_newline_or_eof!() >> - ((s, false)) - ) - | - do_parse!( - option!(whitespace) >> - peek!(tuple!(tag!("/**"), not!(tag!("*")))) >> - s: block_comment >> - ((s, false)) - ) -)); --- a/vendor/proc-macro2/src/strnom.rs +++ b/vendor/proc-macro2/src/strnom.rs @@ -1,26 +1,24 @@ //! Adapted from [`nom`](https://github.com/Geal/nom). +use crate::fallback::LexError; use std::str::{Bytes, CharIndices, Chars}; - use unicode_xid::UnicodeXID; -use stable::LexError; - #[derive(Copy, Clone, Eq, PartialEq)] pub struct Cursor<'a> { pub rest: &'a str, - #[cfg(procmacro2_semver_exempt)] + #[cfg(span_locations)] pub off: u32, } impl<'a> Cursor<'a> { - #[cfg(not(procmacro2_semver_exempt))] + #[cfg(not(span_locations))] pub fn advance(&self, amt: usize) -> Cursor<'a> { Cursor { rest: &self.rest[amt..], } } - #[cfg(procmacro2_semver_exempt)] + #[cfg(span_locations)] pub fn advance(&self, amt: usize) -> Cursor<'a> { Cursor { rest: &self.rest[amt..], @@ -95,7 +93,7 @@ pub fn whitespace(input: Cursor) -> PRes } } match bytes[i] { - b' ' | 0x09...0x0d => { + b' ' | 0x09..=0x0d => { i += 1; continue; } --- a/vendor/proc-macro2/src/unstable.rs +++ /dev/null @@ -1,836 +0,0 @@ -#![cfg_attr(not(super_unstable), allow(dead_code))] - -use std::fmt; -use std::iter; -use std::panic::{self, PanicInfo}; -use std::str::FromStr; - -use proc_macro; -use stable; - -use {Delimiter, Group, Punct, Spacing, TokenTree}; - -#[derive(Clone)] -pub enum TokenStream { - Nightly(proc_macro::TokenStream), - Stable(stable::TokenStream), -} - -pub enum LexError { - Nightly(proc_macro::LexError), - Stable(stable::LexError), -} - -fn nightly_works() -> bool { - use std::sync::atomic::*; - use std::sync::Once; - - static WORKS: AtomicUsize = ATOMIC_USIZE_INIT; - static INIT: Once = Once::new(); - - match WORKS.load(Ordering::SeqCst) { - 1 => return false, - 2 => return true, - _ => {} - } - - // Swap in a null panic hook to avoid printing "thread panicked" to stderr, - // then use catch_unwind to determine whether the compiler's proc_macro is - // working. When proc-macro2 is used from outside of a procedural macro all - // of the proc_macro crate's APIs currently panic. - // - // The Once is to prevent the possibility of this ordering: - // - // thread 1 calls take_hook, gets the user's original hook - // thread 1 calls set_hook with the null hook - // thread 2 calls take_hook, thinks null hook is the original hook - // thread 2 calls set_hook with the null hook - // thread 1 calls set_hook with the actual original hook - // thread 2 calls set_hook with what it thinks is the original hook - // - // in which the user's hook has been lost. - // - // There is still a race condition where a panic in a different thread can - // happen during the interval that the user's original panic hook is - // unregistered such that their hook is incorrectly not called. This is - // sufficiently unlikely and less bad than printing panic messages to stderr - // on correct use of this crate. Maybe there is a libstd feature request - // here. For now, if a user needs to guarantee that this failure mode does - // not occur, they need to call e.g. `proc_macro2::Span::call_site()` from - // the main thread before launching any other threads. - INIT.call_once(|| { - type PanicHook = Fn(&PanicInfo) + Sync + Send + 'static; - - let null_hook: Box = Box::new(|_panic_info| { /* ignore */ }); - let sanity_check = &*null_hook as *const PanicHook; - let original_hook = panic::take_hook(); - panic::set_hook(null_hook); - - let works = panic::catch_unwind(|| proc_macro::Span::call_site()).is_ok(); - WORKS.store(works as usize + 1, Ordering::SeqCst); - - let hopefully_null_hook = panic::take_hook(); - panic::set_hook(original_hook); - if sanity_check != &*hopefully_null_hook { - panic!("observed race condition in proc_macro2::nightly_works"); - } - }); - nightly_works() -} - -fn mismatch() -> ! { - panic!("stable/nightly mismatch") -} - -impl TokenStream { - pub fn new() -> TokenStream { - if nightly_works() { - TokenStream::Nightly(proc_macro::TokenStream::new()) - } else { - TokenStream::Stable(stable::TokenStream::new()) - } - } - - pub fn is_empty(&self) -> bool { - match self { - TokenStream::Nightly(tts) => tts.is_empty(), - TokenStream::Stable(tts) => tts.is_empty(), - } - } - - fn unwrap_nightly(self) -> proc_macro::TokenStream { - match self { - TokenStream::Nightly(s) => s, - TokenStream::Stable(_) => mismatch(), - } - } - - fn unwrap_stable(self) -> stable::TokenStream { - match self { - TokenStream::Nightly(_) => mismatch(), - TokenStream::Stable(s) => s, - } - } -} - -impl FromStr for TokenStream { - type Err = LexError; - - fn from_str(src: &str) -> Result { - if nightly_works() { - Ok(TokenStream::Nightly(src.parse()?)) - } else { - Ok(TokenStream::Stable(src.parse()?)) - } - } -} - -impl fmt::Display for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - TokenStream::Nightly(tts) => tts.fmt(f), - TokenStream::Stable(tts) => tts.fmt(f), - } - } -} - -impl From for TokenStream { - fn from(inner: proc_macro::TokenStream) -> TokenStream { - TokenStream::Nightly(inner) - } -} - -impl From for proc_macro::TokenStream { - fn from(inner: TokenStream) -> proc_macro::TokenStream { - match inner { - TokenStream::Nightly(inner) => inner, - TokenStream::Stable(inner) => inner.to_string().parse().unwrap(), - } - } -} - -impl From for TokenStream { - fn from(inner: stable::TokenStream) -> TokenStream { - TokenStream::Stable(inner) - } -} - -impl From for TokenStream { - fn from(token: TokenTree) -> TokenStream { - if !nightly_works() { - return TokenStream::Stable(token.into()); - } - let tt: proc_macro::TokenTree = match token { - TokenTree::Group(tt) => { - let delim = match tt.delimiter() { - Delimiter::Parenthesis => proc_macro::Delimiter::Parenthesis, - Delimiter::Bracket => proc_macro::Delimiter::Bracket, - Delimiter::Brace => proc_macro::Delimiter::Brace, - Delimiter::None => proc_macro::Delimiter::None, - }; - let span = tt.span(); - let mut group = proc_macro::Group::new(delim, tt.stream.inner.unwrap_nightly()); - group.set_span(span.inner.unwrap_nightly()); - group.into() - } - TokenTree::Punct(tt) => { - let spacing = match tt.spacing() { - Spacing::Joint => proc_macro::Spacing::Joint, - Spacing::Alone => proc_macro::Spacing::Alone, - }; - let mut op = proc_macro::Punct::new(tt.as_char(), spacing); - op.set_span(tt.span().inner.unwrap_nightly()); - op.into() - } - TokenTree::Ident(tt) => tt.inner.unwrap_nightly().into(), - TokenTree::Literal(tt) => tt.inner.unwrap_nightly().into(), - }; - TokenStream::Nightly(tt.into()) - } -} - -impl iter::FromIterator for TokenStream { - fn from_iter>(trees: I) -> Self { - if nightly_works() { - let trees = trees - .into_iter() - .map(TokenStream::from) - .flat_map(|t| match t { - TokenStream::Nightly(s) => s, - TokenStream::Stable(_) => mismatch(), - }); - TokenStream::Nightly(trees.collect()) - } else { - TokenStream::Stable(trees.into_iter().collect()) - } - } -} - -impl iter::FromIterator for TokenStream { - fn from_iter>(streams: I) -> Self { - let mut streams = streams.into_iter(); - match streams.next() { - #[cfg(slow_extend)] - Some(TokenStream::Nightly(first)) => { - let stream = iter::once(first).chain(streams.map(|s| { - match s { - TokenStream::Nightly(s) => s, - TokenStream::Stable(_) => mismatch(), - } - })).collect(); - TokenStream::Nightly(stream) - } - #[cfg(not(slow_extend))] - Some(TokenStream::Nightly(mut first)) => { - first.extend(streams.map(|s| { - match s { - TokenStream::Nightly(s) => s, - TokenStream::Stable(_) => mismatch(), - } - })); - TokenStream::Nightly(first) - } - Some(TokenStream::Stable(mut first)) => { - first.extend(streams.map(|s| { - match s { - TokenStream::Stable(s) => s, - TokenStream::Nightly(_) => mismatch(), - } - })); - TokenStream::Stable(first) - } - None => TokenStream::new(), - - } - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - match self { - TokenStream::Nightly(tts) => { - #[cfg(not(slow_extend))] - { - tts.extend( - streams - .into_iter() - .map(|t| TokenStream::from(t).unwrap_nightly()), - ); - } - #[cfg(slow_extend)] - { - *tts = tts - .clone() - .into_iter() - .chain( - streams - .into_iter() - .map(TokenStream::from) - .flat_map(|t| match t { - TokenStream::Nightly(tts) => tts.into_iter(), - _ => mismatch(), - }), - ).collect(); - } - } - TokenStream::Stable(tts) => tts.extend(streams), - } - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - match self { - TokenStream::Nightly(tts) => { - #[cfg(not(slow_extend))] - { - tts.extend(streams.into_iter().map(|stream| stream.unwrap_nightly())); - } - #[cfg(slow_extend)] - { - *tts = tts - .clone() - .into_iter() - .chain( - streams - .into_iter() - .flat_map(|t| match t { - TokenStream::Nightly(tts) => tts.into_iter(), - _ => mismatch(), - }), - ).collect(); - } - } - TokenStream::Stable(tts) => { - tts.extend(streams.into_iter().map(|stream| stream.unwrap_stable())) - } - } - } -} - -impl fmt::Debug for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - TokenStream::Nightly(tts) => tts.fmt(f), - TokenStream::Stable(tts) => tts.fmt(f), - } - } -} - -impl From for LexError { - fn from(e: proc_macro::LexError) -> LexError { - LexError::Nightly(e) - } -} - -impl From for LexError { - fn from(e: stable::LexError) -> LexError { - LexError::Stable(e) - } -} - -impl fmt::Debug for LexError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - LexError::Nightly(e) => e.fmt(f), - LexError::Stable(e) => e.fmt(f), - } - } -} - -pub enum TokenTreeIter { - Nightly(proc_macro::token_stream::IntoIter), - Stable(stable::TokenTreeIter), -} - -impl IntoIterator for TokenStream { - type Item = TokenTree; - type IntoIter = TokenTreeIter; - - fn into_iter(self) -> TokenTreeIter { - match self { - TokenStream::Nightly(tts) => TokenTreeIter::Nightly(tts.into_iter()), - TokenStream::Stable(tts) => TokenTreeIter::Stable(tts.into_iter()), - } - } -} - -impl Iterator for TokenTreeIter { - type Item = TokenTree; - - fn next(&mut self) -> Option { - let token = match self { - TokenTreeIter::Nightly(iter) => iter.next()?, - TokenTreeIter::Stable(iter) => return iter.next(), - }; - Some(match token { - proc_macro::TokenTree::Group(tt) => { - let delim = match tt.delimiter() { - proc_macro::Delimiter::Parenthesis => Delimiter::Parenthesis, - proc_macro::Delimiter::Bracket => Delimiter::Bracket, - proc_macro::Delimiter::Brace => Delimiter::Brace, - proc_macro::Delimiter::None => Delimiter::None, - }; - let stream = ::TokenStream::_new(TokenStream::Nightly(tt.stream())); - let mut g = Group::new(delim, stream); - g.set_span(::Span::_new(Span::Nightly(tt.span()))); - g.into() - } - proc_macro::TokenTree::Punct(tt) => { - let spacing = match tt.spacing() { - proc_macro::Spacing::Joint => Spacing::Joint, - proc_macro::Spacing::Alone => Spacing::Alone, - }; - let mut o = Punct::new(tt.as_char(), spacing); - o.set_span(::Span::_new(Span::Nightly(tt.span()))); - o.into() - } - proc_macro::TokenTree::Ident(s) => ::Ident::_new(Ident::Nightly(s)).into(), - proc_macro::TokenTree::Literal(l) => ::Literal::_new(Literal::Nightly(l)).into(), - }) - } - - fn size_hint(&self) -> (usize, Option) { - match self { - TokenTreeIter::Nightly(tts) => tts.size_hint(), - TokenTreeIter::Stable(tts) => tts.size_hint(), - } - } -} - -impl fmt::Debug for TokenTreeIter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("TokenTreeIter").finish() - } -} - -pub use stable::FileName; - -// NOTE: We have to generate our own filename object here because we can't wrap -// the one provided by proc_macro. -#[derive(Clone, PartialEq, Eq)] -#[cfg(super_unstable)] -pub enum SourceFile { - Nightly(proc_macro::SourceFile, FileName), - Stable(stable::SourceFile), -} - -#[cfg(super_unstable)] -impl SourceFile { - fn nightly(sf: proc_macro::SourceFile) -> Self { - let filename = stable::file_name(sf.path().display().to_string()); - SourceFile::Nightly(sf, filename) - } - - /// Get the path to this source file as a string. - pub fn path(&self) -> &FileName { - match self { - SourceFile::Nightly(_, f) => f, - SourceFile::Stable(a) => a.path(), - } - } - - pub fn is_real(&self) -> bool { - match self { - SourceFile::Nightly(a, _) => a.is_real(), - SourceFile::Stable(a) => a.is_real(), - } - } -} - -#[cfg(super_unstable)] -impl AsRef for SourceFile { - fn as_ref(&self) -> &FileName { - self.path() - } -} - -#[cfg(super_unstable)] -impl fmt::Debug for SourceFile { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - SourceFile::Nightly(a, _) => a.fmt(f), - SourceFile::Stable(a) => a.fmt(f), - } - } -} - -pub struct LineColumn { - pub line: usize, - pub column: usize, -} - -#[derive(Copy, Clone)] -pub enum Span { - Nightly(proc_macro::Span), - Stable(stable::Span), -} - -impl Span { - pub fn call_site() -> Span { - if nightly_works() { - Span::Nightly(proc_macro::Span::call_site()) - } else { - Span::Stable(stable::Span::call_site()) - } - } - - #[cfg(super_unstable)] - pub fn def_site() -> Span { - if nightly_works() { - Span::Nightly(proc_macro::Span::def_site()) - } else { - Span::Stable(stable::Span::def_site()) - } - } - - #[cfg(super_unstable)] - pub fn resolved_at(&self, other: Span) -> Span { - match (self, other) { - (Span::Nightly(a), Span::Nightly(b)) => Span::Nightly(a.resolved_at(b)), - (Span::Stable(a), Span::Stable(b)) => Span::Stable(a.resolved_at(b)), - _ => mismatch(), - } - } - - #[cfg(super_unstable)] - pub fn located_at(&self, other: Span) -> Span { - match (self, other) { - (Span::Nightly(a), Span::Nightly(b)) => Span::Nightly(a.located_at(b)), - (Span::Stable(a), Span::Stable(b)) => Span::Stable(a.located_at(b)), - _ => mismatch(), - } - } - - pub fn unstable(self) -> proc_macro::Span { - match self { - Span::Nightly(s) => s, - Span::Stable(_) => mismatch(), - } - } - - #[cfg(super_unstable)] - pub fn source_file(&self) -> SourceFile { - match self { - Span::Nightly(s) => SourceFile::nightly(s.source_file()), - Span::Stable(s) => SourceFile::Stable(s.source_file()), - } - } - - #[cfg(super_unstable)] - pub fn start(&self) -> LineColumn { - match self { - Span::Nightly(s) => { - let proc_macro::LineColumn { line, column } = s.start(); - LineColumn { line, column } - } - Span::Stable(s) => { - let stable::LineColumn { line, column } = s.start(); - LineColumn { line, column } - } - } - } - - #[cfg(super_unstable)] - pub fn end(&self) -> LineColumn { - match self { - Span::Nightly(s) => { - let proc_macro::LineColumn { line, column } = s.end(); - LineColumn { line, column } - } - Span::Stable(s) => { - let stable::LineColumn { line, column } = s.end(); - LineColumn { line, column } - } - } - } - - #[cfg(super_unstable)] - pub fn join(&self, other: Span) -> Option { - let ret = match (self, other) { - (Span::Nightly(a), Span::Nightly(b)) => Span::Nightly(a.join(b)?), - (Span::Stable(a), Span::Stable(b)) => Span::Stable(a.join(b)?), - _ => return None, - }; - Some(ret) - } - - #[cfg(super_unstable)] - pub fn eq(&self, other: &Span) -> bool { - match (self, other) { - (Span::Nightly(a), Span::Nightly(b)) => a.eq(b), - (Span::Stable(a), Span::Stable(b)) => a.eq(b), - _ => false, - } - } - - fn unwrap_nightly(self) -> proc_macro::Span { - match self { - Span::Nightly(s) => s, - Span::Stable(_) => mismatch(), - } - } -} - -impl From for ::Span { - fn from(proc_span: proc_macro::Span) -> ::Span { - ::Span::_new(Span::Nightly(proc_span)) - } -} - -impl From for Span { - fn from(inner: stable::Span) -> Span { - Span::Stable(inner) - } -} - -impl fmt::Debug for Span { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Span::Nightly(s) => s.fmt(f), - Span::Stable(s) => s.fmt(f), - } - } -} - -#[derive(Clone)] -pub enum Ident { - Nightly(proc_macro::Ident), - Stable(stable::Ident), -} - -impl Ident { - pub fn new(string: &str, span: Span) -> Ident { - match span { - Span::Nightly(s) => Ident::Nightly(proc_macro::Ident::new(string, s)), - Span::Stable(s) => Ident::Stable(stable::Ident::new(string, s)), - } - } - - pub fn new_raw(string: &str, span: Span) -> Ident { - match span { - Span::Nightly(s) => { - let p: proc_macro::TokenStream = string.parse().unwrap(); - let ident = match p.into_iter().next() { - Some(proc_macro::TokenTree::Ident(mut i)) => { - i.set_span(s); - i - } - _ => panic!(), - }; - Ident::Nightly(ident) - } - Span::Stable(s) => Ident::Stable(stable::Ident::new_raw(string, s)), - } - } - - pub fn span(&self) -> Span { - match self { - Ident::Nightly(t) => Span::Nightly(t.span()), - Ident::Stable(t) => Span::Stable(t.span()), - } - } - - pub fn set_span(&mut self, span: Span) { - match (self, span) { - (Ident::Nightly(t), Span::Nightly(s)) => t.set_span(s), - (Ident::Stable(t), Span::Stable(s)) => t.set_span(s), - _ => mismatch(), - } - } - - fn unwrap_nightly(self) -> proc_macro::Ident { - match self { - Ident::Nightly(s) => s, - Ident::Stable(_) => mismatch(), - } - } -} - -impl PartialEq for Ident { - fn eq(&self, other: &Ident) -> bool { - match (self, other) { - (Ident::Nightly(t), Ident::Nightly(o)) => t.to_string() == o.to_string(), - (Ident::Stable(t), Ident::Stable(o)) => t == o, - _ => mismatch(), - } - } -} - -impl PartialEq for Ident -where - T: ?Sized + AsRef, -{ - fn eq(&self, other: &T) -> bool { - let other = other.as_ref(); - match self { - Ident::Nightly(t) => t.to_string() == other, - Ident::Stable(t) => t == other, - } - } -} - -impl fmt::Display for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Ident::Nightly(t) => t.fmt(f), - Ident::Stable(t) => t.fmt(f), - } - } -} - -impl fmt::Debug for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Ident::Nightly(t) => t.fmt(f), - Ident::Stable(t) => t.fmt(f), - } - } -} - -#[derive(Clone)] -pub enum Literal { - Nightly(proc_macro::Literal), - Stable(stable::Literal), -} - -macro_rules! suffixed_numbers { - ($($name:ident => $kind:ident,)*) => ($( - pub fn $name(n: $kind) -> Literal { - if nightly_works() { - Literal::Nightly(proc_macro::Literal::$name(n)) - } else { - Literal::Stable(stable::Literal::$name(n)) - } - } - )*) -} - -macro_rules! unsuffixed_integers { - ($($name:ident => $kind:ident,)*) => ($( - pub fn $name(n: $kind) -> Literal { - if nightly_works() { - Literal::Nightly(proc_macro::Literal::$name(n)) - } else { - Literal::Stable(stable::Literal::$name(n)) - } - } - )*) -} - -impl Literal { - suffixed_numbers! { - u8_suffixed => u8, - u16_suffixed => u16, - u32_suffixed => u32, - u64_suffixed => u64, - usize_suffixed => usize, - i8_suffixed => i8, - i16_suffixed => i16, - i32_suffixed => i32, - i64_suffixed => i64, - isize_suffixed => isize, - - f32_suffixed => f32, - f64_suffixed => f64, - } - - unsuffixed_integers! { - u8_unsuffixed => u8, - u16_unsuffixed => u16, - u32_unsuffixed => u32, - u64_unsuffixed => u64, - usize_unsuffixed => usize, - i8_unsuffixed => i8, - i16_unsuffixed => i16, - i32_unsuffixed => i32, - i64_unsuffixed => i64, - isize_unsuffixed => isize, - } - - pub fn f32_unsuffixed(f: f32) -> Literal { - if nightly_works() { - Literal::Nightly(proc_macro::Literal::f32_unsuffixed(f)) - } else { - Literal::Stable(stable::Literal::f32_unsuffixed(f)) - } - } - - pub fn f64_unsuffixed(f: f64) -> Literal { - if nightly_works() { - Literal::Nightly(proc_macro::Literal::f64_unsuffixed(f)) - } else { - Literal::Stable(stable::Literal::f64_unsuffixed(f)) - } - } - - pub fn string(t: &str) -> Literal { - if nightly_works() { - Literal::Nightly(proc_macro::Literal::string(t)) - } else { - Literal::Stable(stable::Literal::string(t)) - } - } - - pub fn character(t: char) -> Literal { - if nightly_works() { - Literal::Nightly(proc_macro::Literal::character(t)) - } else { - Literal::Stable(stable::Literal::character(t)) - } - } - - pub fn byte_string(bytes: &[u8]) -> Literal { - if nightly_works() { - Literal::Nightly(proc_macro::Literal::byte_string(bytes)) - } else { - Literal::Stable(stable::Literal::byte_string(bytes)) - } - } - - pub fn span(&self) -> Span { - match self { - Literal::Nightly(lit) => Span::Nightly(lit.span()), - Literal::Stable(lit) => Span::Stable(lit.span()), - } - } - - pub fn set_span(&mut self, span: Span) { - match (self, span) { - (Literal::Nightly(lit), Span::Nightly(s)) => lit.set_span(s), - (Literal::Stable(lit), Span::Stable(s)) => lit.set_span(s), - _ => mismatch(), - } - } - - fn unwrap_nightly(self) -> proc_macro::Literal { - match self { - Literal::Nightly(s) => s, - Literal::Stable(_) => mismatch(), - } - } -} - -impl From for Literal { - fn from(s: stable::Literal) -> Literal { - Literal::Stable(s) - } -} - -impl fmt::Display for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Literal::Nightly(t) => t.fmt(f), - Literal::Stable(t) => t.fmt(f), - } - } -} - -impl fmt::Debug for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Literal::Nightly(t) => t.fmt(f), - Literal::Stable(t) => t.fmt(f), - } - } -} --- /dev/null +++ b/vendor/proc-macro2/src/wrapper.rs @@ -0,0 +1,927 @@ +use std::fmt; +use std::iter; +use std::ops::RangeBounds; +use std::panic::{self, PanicInfo}; +#[cfg(super_unstable)] +use std::path::PathBuf; +use std::str::FromStr; + +use crate::{fallback, Delimiter, Punct, Spacing, TokenTree}; + +#[derive(Clone)] +pub enum TokenStream { + Compiler(DeferredTokenStream), + Fallback(fallback::TokenStream), +} + +// Work around https://github.com/rust-lang/rust/issues/65080. +// In `impl Extend for TokenStream` which is used heavily by quote, +// we hold on to the appended tokens and do proc_macro::TokenStream::extend as +// late as possible to batch together consecutive uses of the Extend impl. +#[derive(Clone)] +pub struct DeferredTokenStream { + stream: proc_macro::TokenStream, + extra: Vec, +} + +pub enum LexError { + Compiler(proc_macro::LexError), + Fallback(fallback::LexError), +} + +fn nightly_works() -> bool { + use std::sync::atomic::*; + use std::sync::Once; + + static WORKS: AtomicUsize = AtomicUsize::new(0); + static INIT: Once = Once::new(); + + match WORKS.load(Ordering::SeqCst) { + 1 => return false, + 2 => return true, + _ => {} + } + + // Swap in a null panic hook to avoid printing "thread panicked" to stderr, + // then use catch_unwind to determine whether the compiler's proc_macro is + // working. When proc-macro2 is used from outside of a procedural macro all + // of the proc_macro crate's APIs currently panic. + // + // The Once is to prevent the possibility of this ordering: + // + // thread 1 calls take_hook, gets the user's original hook + // thread 1 calls set_hook with the null hook + // thread 2 calls take_hook, thinks null hook is the original hook + // thread 2 calls set_hook with the null hook + // thread 1 calls set_hook with the actual original hook + // thread 2 calls set_hook with what it thinks is the original hook + // + // in which the user's hook has been lost. + // + // There is still a race condition where a panic in a different thread can + // happen during the interval that the user's original panic hook is + // unregistered such that their hook is incorrectly not called. This is + // sufficiently unlikely and less bad than printing panic messages to stderr + // on correct use of this crate. Maybe there is a libstd feature request + // here. For now, if a user needs to guarantee that this failure mode does + // not occur, they need to call e.g. `proc_macro2::Span::call_site()` from + // the main thread before launching any other threads. + INIT.call_once(|| { + type PanicHook = dyn Fn(&PanicInfo) + Sync + Send + 'static; + + let null_hook: Box = Box::new(|_panic_info| { /* ignore */ }); + let sanity_check = &*null_hook as *const PanicHook; + let original_hook = panic::take_hook(); + panic::set_hook(null_hook); + + let works = panic::catch_unwind(|| proc_macro::Span::call_site()).is_ok(); + WORKS.store(works as usize + 1, Ordering::SeqCst); + + let hopefully_null_hook = panic::take_hook(); + panic::set_hook(original_hook); + if sanity_check != &*hopefully_null_hook { + panic!("observed race condition in proc_macro2::nightly_works"); + } + }); + nightly_works() +} + +fn mismatch() -> ! { + panic!("stable/nightly mismatch") +} + +impl DeferredTokenStream { + fn new(stream: proc_macro::TokenStream) -> Self { + DeferredTokenStream { + stream, + extra: Vec::new(), + } + } + + fn is_empty(&self) -> bool { + self.stream.is_empty() && self.extra.is_empty() + } + + fn evaluate_now(&mut self) { + self.stream.extend(self.extra.drain(..)); + } + + fn into_token_stream(mut self) -> proc_macro::TokenStream { + self.evaluate_now(); + self.stream + } +} + +impl TokenStream { + pub fn new() -> TokenStream { + if nightly_works() { + TokenStream::Compiler(DeferredTokenStream::new(proc_macro::TokenStream::new())) + } else { + TokenStream::Fallback(fallback::TokenStream::new()) + } + } + + pub fn is_empty(&self) -> bool { + match self { + TokenStream::Compiler(tts) => tts.is_empty(), + TokenStream::Fallback(tts) => tts.is_empty(), + } + } + + fn unwrap_nightly(self) -> proc_macro::TokenStream { + match self { + TokenStream::Compiler(s) => s.into_token_stream(), + TokenStream::Fallback(_) => mismatch(), + } + } + + fn unwrap_stable(self) -> fallback::TokenStream { + match self { + TokenStream::Compiler(_) => mismatch(), + TokenStream::Fallback(s) => s, + } + } +} + +impl FromStr for TokenStream { + type Err = LexError; + + fn from_str(src: &str) -> Result { + if nightly_works() { + Ok(TokenStream::Compiler(DeferredTokenStream::new( + src.parse()?, + ))) + } else { + Ok(TokenStream::Fallback(src.parse()?)) + } + } +} + +impl fmt::Display for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + TokenStream::Compiler(tts) => tts.clone().into_token_stream().fmt(f), + TokenStream::Fallback(tts) => tts.fmt(f), + } + } +} + +impl From for TokenStream { + fn from(inner: proc_macro::TokenStream) -> TokenStream { + TokenStream::Compiler(DeferredTokenStream::new(inner)) + } +} + +impl From for proc_macro::TokenStream { + fn from(inner: TokenStream) -> proc_macro::TokenStream { + match inner { + TokenStream::Compiler(inner) => inner.into_token_stream(), + TokenStream::Fallback(inner) => inner.to_string().parse().unwrap(), + } + } +} + +impl From for TokenStream { + fn from(inner: fallback::TokenStream) -> TokenStream { + TokenStream::Fallback(inner) + } +} + +// Assumes nightly_works(). +fn into_compiler_token(token: TokenTree) -> proc_macro::TokenTree { + match token { + TokenTree::Group(tt) => tt.inner.unwrap_nightly().into(), + TokenTree::Punct(tt) => { + let spacing = match tt.spacing() { + Spacing::Joint => proc_macro::Spacing::Joint, + Spacing::Alone => proc_macro::Spacing::Alone, + }; + let mut op = proc_macro::Punct::new(tt.as_char(), spacing); + op.set_span(tt.span().inner.unwrap_nightly()); + op.into() + } + TokenTree::Ident(tt) => tt.inner.unwrap_nightly().into(), + TokenTree::Literal(tt) => tt.inner.unwrap_nightly().into(), + } +} + +impl From for TokenStream { + fn from(token: TokenTree) -> TokenStream { + if nightly_works() { + TokenStream::Compiler(DeferredTokenStream::new(into_compiler_token(token).into())) + } else { + TokenStream::Fallback(token.into()) + } + } +} + +impl iter::FromIterator for TokenStream { + fn from_iter>(trees: I) -> Self { + if nightly_works() { + TokenStream::Compiler(DeferredTokenStream::new( + trees.into_iter().map(into_compiler_token).collect(), + )) + } else { + TokenStream::Fallback(trees.into_iter().collect()) + } + } +} + +impl iter::FromIterator for TokenStream { + fn from_iter>(streams: I) -> Self { + let mut streams = streams.into_iter(); + match streams.next() { + Some(TokenStream::Compiler(mut first)) => { + first.evaluate_now(); + first.stream.extend(streams.map(|s| match s { + TokenStream::Compiler(s) => s.into_token_stream(), + TokenStream::Fallback(_) => mismatch(), + })); + TokenStream::Compiler(first) + } + Some(TokenStream::Fallback(mut first)) => { + first.extend(streams.map(|s| match s { + TokenStream::Fallback(s) => s, + TokenStream::Compiler(_) => mismatch(), + })); + TokenStream::Fallback(first) + } + None => TokenStream::new(), + } + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + match self { + TokenStream::Compiler(tts) => { + // Here is the reason for DeferredTokenStream. + tts.extra + .extend(streams.into_iter().map(into_compiler_token)); + } + TokenStream::Fallback(tts) => tts.extend(streams), + } + } +} + +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + match self { + TokenStream::Compiler(tts) => { + tts.evaluate_now(); + tts.stream + .extend(streams.into_iter().map(|stream| stream.unwrap_nightly())); + } + TokenStream::Fallback(tts) => { + tts.extend(streams.into_iter().map(|stream| stream.unwrap_stable())); + } + } + } +} + +impl fmt::Debug for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + TokenStream::Compiler(tts) => tts.clone().into_token_stream().fmt(f), + TokenStream::Fallback(tts) => tts.fmt(f), + } + } +} + +impl From for LexError { + fn from(e: proc_macro::LexError) -> LexError { + LexError::Compiler(e) + } +} + +impl From for LexError { + fn from(e: fallback::LexError) -> LexError { + LexError::Fallback(e) + } +} + +impl fmt::Debug for LexError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + LexError::Compiler(e) => e.fmt(f), + LexError::Fallback(e) => e.fmt(f), + } + } +} + +#[derive(Clone)] +pub enum TokenTreeIter { + Compiler(proc_macro::token_stream::IntoIter), + Fallback(fallback::TokenTreeIter), +} + +impl IntoIterator for TokenStream { + type Item = TokenTree; + type IntoIter = TokenTreeIter; + + fn into_iter(self) -> TokenTreeIter { + match self { + TokenStream::Compiler(tts) => { + TokenTreeIter::Compiler(tts.into_token_stream().into_iter()) + } + TokenStream::Fallback(tts) => TokenTreeIter::Fallback(tts.into_iter()), + } + } +} + +impl Iterator for TokenTreeIter { + type Item = TokenTree; + + fn next(&mut self) -> Option { + let token = match self { + TokenTreeIter::Compiler(iter) => iter.next()?, + TokenTreeIter::Fallback(iter) => return iter.next(), + }; + Some(match token { + proc_macro::TokenTree::Group(tt) => crate::Group::_new(Group::Compiler(tt)).into(), + proc_macro::TokenTree::Punct(tt) => { + let spacing = match tt.spacing() { + proc_macro::Spacing::Joint => Spacing::Joint, + proc_macro::Spacing::Alone => Spacing::Alone, + }; + let mut o = Punct::new(tt.as_char(), spacing); + o.set_span(crate::Span::_new(Span::Compiler(tt.span()))); + o.into() + } + proc_macro::TokenTree::Ident(s) => crate::Ident::_new(Ident::Compiler(s)).into(), + proc_macro::TokenTree::Literal(l) => crate::Literal::_new(Literal::Compiler(l)).into(), + }) + } + + fn size_hint(&self) -> (usize, Option) { + match self { + TokenTreeIter::Compiler(tts) => tts.size_hint(), + TokenTreeIter::Fallback(tts) => tts.size_hint(), + } + } +} + +impl fmt::Debug for TokenTreeIter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("TokenTreeIter").finish() + } +} + +#[derive(Clone, PartialEq, Eq)] +#[cfg(super_unstable)] +pub enum SourceFile { + Compiler(proc_macro::SourceFile), + Fallback(fallback::SourceFile), +} + +#[cfg(super_unstable)] +impl SourceFile { + fn nightly(sf: proc_macro::SourceFile) -> Self { + SourceFile::Compiler(sf) + } + + /// Get the path to this source file as a string. + pub fn path(&self) -> PathBuf { + match self { + SourceFile::Compiler(a) => a.path(), + SourceFile::Fallback(a) => a.path(), + } + } + + pub fn is_real(&self) -> bool { + match self { + SourceFile::Compiler(a) => a.is_real(), + SourceFile::Fallback(a) => a.is_real(), + } + } +} + +#[cfg(super_unstable)] +impl fmt::Debug for SourceFile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + SourceFile::Compiler(a) => a.fmt(f), + SourceFile::Fallback(a) => a.fmt(f), + } + } +} + +#[cfg(any(super_unstable, feature = "span-locations"))] +pub struct LineColumn { + pub line: usize, + pub column: usize, +} + +#[derive(Copy, Clone)] +pub enum Span { + Compiler(proc_macro::Span), + Fallback(fallback::Span), +} + +impl Span { + pub fn call_site() -> Span { + if nightly_works() { + Span::Compiler(proc_macro::Span::call_site()) + } else { + Span::Fallback(fallback::Span::call_site()) + } + } + + #[cfg(super_unstable)] + pub fn def_site() -> Span { + if nightly_works() { + Span::Compiler(proc_macro::Span::def_site()) + } else { + Span::Fallback(fallback::Span::def_site()) + } + } + + #[cfg(super_unstable)] + pub fn resolved_at(&self, other: Span) -> Span { + match (self, other) { + (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.resolved_at(b)), + (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.resolved_at(b)), + _ => mismatch(), + } + } + + #[cfg(super_unstable)] + pub fn located_at(&self, other: Span) -> Span { + match (self, other) { + (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.located_at(b)), + (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.located_at(b)), + _ => mismatch(), + } + } + + pub fn unwrap(self) -> proc_macro::Span { + match self { + Span::Compiler(s) => s, + Span::Fallback(_) => panic!("proc_macro::Span is only available in procedural macros"), + } + } + + #[cfg(super_unstable)] + pub fn source_file(&self) -> SourceFile { + match self { + Span::Compiler(s) => SourceFile::nightly(s.source_file()), + Span::Fallback(s) => SourceFile::Fallback(s.source_file()), + } + } + + #[cfg(any(super_unstable, feature = "span-locations"))] + pub fn start(&self) -> LineColumn { + match self { + #[cfg(proc_macro_span)] + Span::Compiler(s) => { + let proc_macro::LineColumn { line, column } = s.start(); + LineColumn { line, column } + } + #[cfg(not(proc_macro_span))] + Span::Compiler(_) => LineColumn { line: 0, column: 0 }, + Span::Fallback(s) => { + let fallback::LineColumn { line, column } = s.start(); + LineColumn { line, column } + } + } + } + + #[cfg(any(super_unstable, feature = "span-locations"))] + pub fn end(&self) -> LineColumn { + match self { + #[cfg(proc_macro_span)] + Span::Compiler(s) => { + let proc_macro::LineColumn { line, column } = s.end(); + LineColumn { line, column } + } + #[cfg(not(proc_macro_span))] + Span::Compiler(_) => LineColumn { line: 0, column: 0 }, + Span::Fallback(s) => { + let fallback::LineColumn { line, column } = s.end(); + LineColumn { line, column } + } + } + } + + pub fn join(&self, other: Span) -> Option { + let ret = match (self, other) { + #[cfg(proc_macro_span)] + (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.join(b)?), + (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.join(b)?), + _ => return None, + }; + Some(ret) + } + + #[cfg(super_unstable)] + pub fn eq(&self, other: &Span) -> bool { + match (self, other) { + (Span::Compiler(a), Span::Compiler(b)) => a.eq(b), + (Span::Fallback(a), Span::Fallback(b)) => a.eq(b), + _ => false, + } + } + + fn unwrap_nightly(self) -> proc_macro::Span { + match self { + Span::Compiler(s) => s, + Span::Fallback(_) => mismatch(), + } + } +} + +impl From for crate::Span { + fn from(proc_span: proc_macro::Span) -> crate::Span { + crate::Span::_new(Span::Compiler(proc_span)) + } +} + +impl From for Span { + fn from(inner: fallback::Span) -> Span { + Span::Fallback(inner) + } +} + +impl fmt::Debug for Span { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Span::Compiler(s) => s.fmt(f), + Span::Fallback(s) => s.fmt(f), + } + } +} + +pub fn debug_span_field_if_nontrivial(debug: &mut fmt::DebugStruct, span: Span) { + match span { + Span::Compiler(s) => { + debug.field("span", &s); + } + Span::Fallback(s) => fallback::debug_span_field_if_nontrivial(debug, s), + } +} + +#[derive(Clone)] +pub enum Group { + Compiler(proc_macro::Group), + Fallback(fallback::Group), +} + +impl Group { + pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group { + match stream { + TokenStream::Compiler(tts) => { + let delimiter = match delimiter { + Delimiter::Parenthesis => proc_macro::Delimiter::Parenthesis, + Delimiter::Bracket => proc_macro::Delimiter::Bracket, + Delimiter::Brace => proc_macro::Delimiter::Brace, + Delimiter::None => proc_macro::Delimiter::None, + }; + Group::Compiler(proc_macro::Group::new(delimiter, tts.into_token_stream())) + } + TokenStream::Fallback(stream) => { + Group::Fallback(fallback::Group::new(delimiter, stream)) + } + } + } + + pub fn delimiter(&self) -> Delimiter { + match self { + Group::Compiler(g) => match g.delimiter() { + proc_macro::Delimiter::Parenthesis => Delimiter::Parenthesis, + proc_macro::Delimiter::Bracket => Delimiter::Bracket, + proc_macro::Delimiter::Brace => Delimiter::Brace, + proc_macro::Delimiter::None => Delimiter::None, + }, + Group::Fallback(g) => g.delimiter(), + } + } + + pub fn stream(&self) -> TokenStream { + match self { + Group::Compiler(g) => TokenStream::Compiler(DeferredTokenStream::new(g.stream())), + Group::Fallback(g) => TokenStream::Fallback(g.stream()), + } + } + + pub fn span(&self) -> Span { + match self { + Group::Compiler(g) => Span::Compiler(g.span()), + Group::Fallback(g) => Span::Fallback(g.span()), + } + } + + pub fn span_open(&self) -> Span { + match self { + #[cfg(proc_macro_span)] + Group::Compiler(g) => Span::Compiler(g.span_open()), + #[cfg(not(proc_macro_span))] + Group::Compiler(g) => Span::Compiler(g.span()), + Group::Fallback(g) => Span::Fallback(g.span_open()), + } + } + + pub fn span_close(&self) -> Span { + match self { + #[cfg(proc_macro_span)] + Group::Compiler(g) => Span::Compiler(g.span_close()), + #[cfg(not(proc_macro_span))] + Group::Compiler(g) => Span::Compiler(g.span()), + Group::Fallback(g) => Span::Fallback(g.span_close()), + } + } + + pub fn set_span(&mut self, span: Span) { + match (self, span) { + (Group::Compiler(g), Span::Compiler(s)) => g.set_span(s), + (Group::Fallback(g), Span::Fallback(s)) => g.set_span(s), + _ => mismatch(), + } + } + + fn unwrap_nightly(self) -> proc_macro::Group { + match self { + Group::Compiler(g) => g, + Group::Fallback(_) => mismatch(), + } + } +} + +impl From for Group { + fn from(g: fallback::Group) -> Self { + Group::Fallback(g) + } +} + +impl fmt::Display for Group { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match self { + Group::Compiler(group) => group.fmt(formatter), + Group::Fallback(group) => group.fmt(formatter), + } + } +} + +impl fmt::Debug for Group { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match self { + Group::Compiler(group) => group.fmt(formatter), + Group::Fallback(group) => group.fmt(formatter), + } + } +} + +#[derive(Clone)] +pub enum Ident { + Compiler(proc_macro::Ident), + Fallback(fallback::Ident), +} + +impl Ident { + pub fn new(string: &str, span: Span) -> Ident { + match span { + Span::Compiler(s) => Ident::Compiler(proc_macro::Ident::new(string, s)), + Span::Fallback(s) => Ident::Fallback(fallback::Ident::new(string, s)), + } + } + + pub fn new_raw(string: &str, span: Span) -> Ident { + match span { + Span::Compiler(s) => { + let p: proc_macro::TokenStream = string.parse().unwrap(); + let ident = match p.into_iter().next() { + Some(proc_macro::TokenTree::Ident(mut i)) => { + i.set_span(s); + i + } + _ => panic!(), + }; + Ident::Compiler(ident) + } + Span::Fallback(s) => Ident::Fallback(fallback::Ident::new_raw(string, s)), + } + } + + pub fn span(&self) -> Span { + match self { + Ident::Compiler(t) => Span::Compiler(t.span()), + Ident::Fallback(t) => Span::Fallback(t.span()), + } + } + + pub fn set_span(&mut self, span: Span) { + match (self, span) { + (Ident::Compiler(t), Span::Compiler(s)) => t.set_span(s), + (Ident::Fallback(t), Span::Fallback(s)) => t.set_span(s), + _ => mismatch(), + } + } + + fn unwrap_nightly(self) -> proc_macro::Ident { + match self { + Ident::Compiler(s) => s, + Ident::Fallback(_) => mismatch(), + } + } +} + +impl PartialEq for Ident { + fn eq(&self, other: &Ident) -> bool { + match (self, other) { + (Ident::Compiler(t), Ident::Compiler(o)) => t.to_string() == o.to_string(), + (Ident::Fallback(t), Ident::Fallback(o)) => t == o, + _ => mismatch(), + } + } +} + +impl PartialEq for Ident +where + T: ?Sized + AsRef, +{ + fn eq(&self, other: &T) -> bool { + let other = other.as_ref(); + match self { + Ident::Compiler(t) => t.to_string() == other, + Ident::Fallback(t) => t == other, + } + } +} + +impl fmt::Display for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Ident::Compiler(t) => t.fmt(f), + Ident::Fallback(t) => t.fmt(f), + } + } +} + +impl fmt::Debug for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Ident::Compiler(t) => t.fmt(f), + Ident::Fallback(t) => t.fmt(f), + } + } +} + +#[derive(Clone)] +pub enum Literal { + Compiler(proc_macro::Literal), + Fallback(fallback::Literal), +} + +macro_rules! suffixed_numbers { + ($($name:ident => $kind:ident,)*) => ($( + pub fn $name(n: $kind) -> Literal { + if nightly_works() { + Literal::Compiler(proc_macro::Literal::$name(n)) + } else { + Literal::Fallback(fallback::Literal::$name(n)) + } + } + )*) +} + +macro_rules! unsuffixed_integers { + ($($name:ident => $kind:ident,)*) => ($( + pub fn $name(n: $kind) -> Literal { + if nightly_works() { + Literal::Compiler(proc_macro::Literal::$name(n)) + } else { + Literal::Fallback(fallback::Literal::$name(n)) + } + } + )*) +} + +impl Literal { + suffixed_numbers! { + u8_suffixed => u8, + u16_suffixed => u16, + u32_suffixed => u32, + u64_suffixed => u64, + u128_suffixed => u128, + usize_suffixed => usize, + i8_suffixed => i8, + i16_suffixed => i16, + i32_suffixed => i32, + i64_suffixed => i64, + i128_suffixed => i128, + isize_suffixed => isize, + + f32_suffixed => f32, + f64_suffixed => f64, + } + + unsuffixed_integers! { + u8_unsuffixed => u8, + u16_unsuffixed => u16, + u32_unsuffixed => u32, + u64_unsuffixed => u64, + u128_unsuffixed => u128, + usize_unsuffixed => usize, + i8_unsuffixed => i8, + i16_unsuffixed => i16, + i32_unsuffixed => i32, + i64_unsuffixed => i64, + i128_unsuffixed => i128, + isize_unsuffixed => isize, + } + + pub fn f32_unsuffixed(f: f32) -> Literal { + if nightly_works() { + Literal::Compiler(proc_macro::Literal::f32_unsuffixed(f)) + } else { + Literal::Fallback(fallback::Literal::f32_unsuffixed(f)) + } + } + + pub fn f64_unsuffixed(f: f64) -> Literal { + if nightly_works() { + Literal::Compiler(proc_macro::Literal::f64_unsuffixed(f)) + } else { + Literal::Fallback(fallback::Literal::f64_unsuffixed(f)) + } + } + + pub fn string(t: &str) -> Literal { + if nightly_works() { + Literal::Compiler(proc_macro::Literal::string(t)) + } else { + Literal::Fallback(fallback::Literal::string(t)) + } + } + + pub fn character(t: char) -> Literal { + if nightly_works() { + Literal::Compiler(proc_macro::Literal::character(t)) + } else { + Literal::Fallback(fallback::Literal::character(t)) + } + } + + pub fn byte_string(bytes: &[u8]) -> Literal { + if nightly_works() { + Literal::Compiler(proc_macro::Literal::byte_string(bytes)) + } else { + Literal::Fallback(fallback::Literal::byte_string(bytes)) + } + } + + pub fn span(&self) -> Span { + match self { + Literal::Compiler(lit) => Span::Compiler(lit.span()), + Literal::Fallback(lit) => Span::Fallback(lit.span()), + } + } + + pub fn set_span(&mut self, span: Span) { + match (self, span) { + (Literal::Compiler(lit), Span::Compiler(s)) => lit.set_span(s), + (Literal::Fallback(lit), Span::Fallback(s)) => lit.set_span(s), + _ => mismatch(), + } + } + + pub fn subspan>(&self, range: R) -> Option { + match self { + #[cfg(proc_macro_span)] + Literal::Compiler(lit) => lit.subspan(range).map(Span::Compiler), + #[cfg(not(proc_macro_span))] + Literal::Compiler(_lit) => None, + Literal::Fallback(lit) => lit.subspan(range).map(Span::Fallback), + } + } + + fn unwrap_nightly(self) -> proc_macro::Literal { + match self { + Literal::Compiler(s) => s, + Literal::Fallback(_) => mismatch(), + } + } +} + +impl From for Literal { + fn from(s: fallback::Literal) -> Literal { + Literal::Fallback(s) + } +} + +impl fmt::Display for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Literal::Compiler(t) => t.fmt(f), + Literal::Fallback(t) => t.fmt(f), + } + } +} + +impl fmt::Debug for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Literal::Compiler(t) => t.fmt(f), + Literal::Fallback(t) => t.fmt(f), + } + } +} --- /dev/null +++ b/vendor/proc-macro2/tests/features.rs @@ -0,0 +1,8 @@ +#[test] +#[ignore] +fn make_sure_no_proc_macro() { + assert!( + !cfg!(feature = "proc-macro"), + "still compiled with proc_macro?" + ); +} --- /dev/null +++ b/vendor/proc-macro2/tests/marker.rs @@ -0,0 +1,59 @@ +use proc_macro2::*; + +macro_rules! assert_impl { + ($ty:ident is $($marker:ident) and +) => { + #[test] + #[allow(non_snake_case)] + fn $ty() { + fn assert_implemented() {} + assert_implemented::<$ty>(); + } + }; + + ($ty:ident is not $($marker:ident) or +) => { + #[test] + #[allow(non_snake_case)] + fn $ty() { + $( + { + // Implemented for types that implement $marker. + trait IsNotImplemented { + fn assert_not_implemented() {} + } + impl IsNotImplemented for T {} + + // Implemented for the type being tested. + trait IsImplemented { + fn assert_not_implemented() {} + } + impl IsImplemented for $ty {} + + // If $ty does not implement $marker, there is no ambiguity + // in the following trait method call. + <$ty>::assert_not_implemented(); + } + )+ + } + }; +} + +assert_impl!(Delimiter is Send and Sync); +assert_impl!(Spacing is Send and Sync); + +assert_impl!(Group is not Send or Sync); +assert_impl!(Ident is not Send or Sync); +assert_impl!(LexError is not Send or Sync); +assert_impl!(Literal is not Send or Sync); +assert_impl!(Punct is not Send or Sync); +assert_impl!(Span is not Send or Sync); +assert_impl!(TokenStream is not Send or Sync); +assert_impl!(TokenTree is not Send or Sync); + +#[cfg(procmacro2_semver_exempt)] +mod semver_exempt { + use super::*; + + assert_impl!(LineColumn is Send and Sync); + + assert_impl!(SourceFile is not Send or Sync); +} --- a/vendor/proc-macro2/tests/test.rs +++ b/vendor/proc-macro2/tests/test.rs @@ -1,11 +1,9 @@ -extern crate proc_macro2; - use std::str::{self, FromStr}; use proc_macro2::{Ident, Literal, Spacing, Span, TokenStream, TokenTree}; #[test] -fn terms() { +fn idents() { assert_eq!( Ident::new("String", Span::call_site()).to_string(), "String" @@ -16,7 +14,7 @@ fn terms() { #[test] #[cfg(procmacro2_semver_exempt)] -fn raw_terms() { +fn raw_idents() { assert_eq!( Ident::new_raw("String", Span::call_site()).to_string(), "r#String" @@ -27,37 +25,37 @@ fn raw_terms() { #[test] #[should_panic(expected = "Ident is not allowed to be empty; use Option")] -fn term_empty() { +fn ident_empty() { Ident::new("", Span::call_site()); } #[test] #[should_panic(expected = "Ident cannot be a number; use Literal instead")] -fn term_number() { +fn ident_number() { Ident::new("255", Span::call_site()); } #[test] #[should_panic(expected = "\"a#\" is not a valid Ident")] -fn term_invalid() { +fn ident_invalid() { Ident::new("a#", Span::call_site()); } #[test] #[should_panic(expected = "not a valid Ident")] -fn raw_term_empty() { +fn raw_ident_empty() { Ident::new("r#", Span::call_site()); } #[test] #[should_panic(expected = "not a valid Ident")] -fn raw_term_number() { +fn raw_ident_number() { Ident::new("r#255", Span::call_site()); } #[test] #[should_panic(expected = "\"r#a#\" is not a valid Ident")] -fn raw_term_invalid() { +fn raw_ident_invalid() { Ident::new("r#a#", Span::call_site()); } @@ -80,13 +78,41 @@ fn lifetime_invalid() { } #[test] -fn literals() { +fn literal_string() { assert_eq!(Literal::string("foo").to_string(), "\"foo\""); assert_eq!(Literal::string("\"").to_string(), "\"\\\"\""); + assert_eq!(Literal::string("didn't").to_string(), "\"didn't\""); +} + +#[test] +fn literal_character() { + assert_eq!(Literal::character('x').to_string(), "'x'"); + assert_eq!(Literal::character('\'').to_string(), "'\\''"); + assert_eq!(Literal::character('"').to_string(), "'\"'"); +} + +#[test] +fn literal_float() { assert_eq!(Literal::f32_unsuffixed(10.0).to_string(), "10.0"); } #[test] +fn literal_suffix() { + fn token_count(p: &str) -> usize { + p.parse::().unwrap().into_iter().count() + } + + assert_eq!(token_count("999u256"), 1); + assert_eq!(token_count("999r#u256"), 3); + assert_eq!(token_count("1."), 1); + assert_eq!(token_count("1.f32"), 3); + assert_eq!(token_count("1.0_0"), 1); + assert_eq!(token_count("1._0"), 3); + assert_eq!(token_count("1._m"), 3); + assert_eq!(token_count("\"\"s"), 1); +} + +#[test] fn roundtrip() { fn roundtrip(p: &str) { println!("parse: {}", p); @@ -113,6 +139,9 @@ fn roundtrip() { 9 0 0xffffffffffffffffffffffffffffffff + 1x + 1u80 + 1f320 ", ); roundtrip("'a"); @@ -129,15 +158,12 @@ fn fail() { panic!("should have failed to parse: {}\n{:#?}", p, s); } } - fail("1x"); - fail("1u80"); - fail("1f320"); fail("' static"); fail("r#1"); fail("r#_"); } -#[cfg(procmacro2_semver_exempt)] +#[cfg(span_locations)] #[test] fn span_test() { use proc_macro2::TokenTree; @@ -193,7 +219,7 @@ testing 123 } #[cfg(procmacro2_semver_exempt)] -#[cfg(not(feature = "nightly"))] +#[cfg(not(nightly))] #[test] fn default_span() { let start = Span::call_site().start(); @@ -203,7 +229,7 @@ fn default_span() { assert_eq!(end.line, 1); assert_eq!(end.column, 0); let source_file = Span::call_site().source_file(); - assert_eq!(source_file.path().to_string(), ""); + assert_eq!(source_file.path().to_string_lossy(), ""); assert!(!source_file.is_real()); } @@ -329,7 +355,6 @@ fn test_debug_ident() { } #[test] -#[cfg(not(feature = "nightly"))] fn test_debug_tokenstream() { let tts = TokenStream::from_str("[a + 1]").unwrap(); @@ -340,6 +365,27 @@ TokenStream [ delimiter: Bracket, stream: TokenStream [ Ident { + sym: a, + }, + Punct { + op: '+', + spacing: Alone, + }, + Literal { + lit: 1, + }, + ], + }, +]\ + "; + + #[cfg(not(procmacro2_semver_exempt))] + let expected_before_trailing_commas = "\ +TokenStream [ + Group { + delimiter: Bracket, + stream: TokenStream [ + Ident { sym: a }, Punct { @@ -362,6 +408,31 @@ TokenStream [ stream: TokenStream [ Ident { sym: a, + span: bytes(2..3), + }, + Punct { + op: '+', + spacing: Alone, + span: bytes(4..5), + }, + Literal { + lit: 1, + span: bytes(6..7), + }, + ], + span: bytes(1..8), + }, +]\ + "; + + #[cfg(procmacro2_semver_exempt)] + let expected_before_trailing_commas = "\ +TokenStream [ + Group { + delimiter: Bracket, + stream: TokenStream [ + Ident { + sym: a, span: bytes(2..3) }, Punct { @@ -379,7 +450,12 @@ TokenStream [ ]\ "; - assert_eq!(expected, format!("{:#?}", tts)); + let actual = format!("{:#?}", tts); + if actual.ends_with(",\n]") { + assert_eq!(expected, actual); + } else { + assert_eq!(expected_before_trailing_commas, actual); + } } #[test] --- /dev/null +++ b/vendor/quote-0.6.8/Cargo.toml @@ -0,0 +1,32 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "quote" +version = "0.6.8" +authors = ["David Tolnay "] +include = ["Cargo.toml", "src/**/*.rs", "tests/**/*.rs", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +description = "Quasi-quoting macro quote!(...)" +documentation = "https://docs.rs/quote/" +readme = "README.md" +keywords = ["syn"] +license = "MIT/Apache-2.0" +repository = "https://github.com/dtolnay/quote" +[dependencies.proc-macro2] +version = "0.4.13" +default-features = false + +[features] +default = ["proc-macro"] +proc-macro = ["proc-macro2/proc-macro"] +[badges.travis-ci] +repository = "dtolnay/quote" --- /dev/null +++ b/vendor/quote-0.6.8/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. --- /dev/null +++ b/vendor/quote-0.6.8/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. --- /dev/null +++ b/vendor/quote-0.6.8/README.md @@ -0,0 +1,147 @@ +Rust Quasi-Quoting +================== + +[![Build Status](https://api.travis-ci.org/dtolnay/quote.svg?branch=master)](https://travis-ci.org/dtolnay/quote) +[![Latest Version](https://img.shields.io/crates/v/quote.svg)](https://crates.io/crates/quote) +[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/quote/) + +This crate provides the [`quote!`] macro for turning Rust syntax tree data +structures into tokens of source code. + +[`quote!`]: https://docs.rs/quote/0.6/quote/macro.quote.html + +Procedural macros in Rust receive a stream of tokens as input, execute arbitrary +Rust code to determine how to manipulate those tokens, and produce a stream of +tokens to hand back to the compiler to compile into the caller's crate. +Quasi-quoting is a solution to one piece of that -- producing tokens to return +to the compiler. + +The idea of quasi-quoting is that we write *code* that we treat as *data*. +Within the `quote!` macro, we can write what looks like code to our text editor +or IDE. We get all the benefits of the editor's brace matching, syntax +highlighting, indentation, and maybe autocompletion. But rather than compiling +that as code into the current crate, we can treat it as data, pass it around, +mutate it, and eventually hand it back to the compiler as tokens to compile into +the macro caller's crate. + +This crate is motivated by the procedural macro use case, but is a +general-purpose Rust quasi-quoting library and is not specific to procedural +macros. + +*Version requirement: Quote supports any compiler version back to Rust's very +first support for procedural macros in Rust 1.15.0.* + +[*Release notes*](https://github.com/dtolnay/quote/releases) + +```toml +[dependencies] +quote = "0.6" +``` + +```rust +#[macro_use] +extern crate quote; +``` + +## Syntax + +The quote crate provides a [`quote!`] macro within which you can write Rust code +that gets packaged into a [`TokenStream`] and can be treated as data. You should +think of `TokenStream` as representing a fragment of Rust source code. This type +can be returned directly back to the compiler by a procedural macro to get +compiled into the caller's crate. + +[`TokenStream`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.TokenStream.html + +Within the `quote!` macro, interpolation is done with `#var`. Any type +implementing the [`quote::ToTokens`] trait can be interpolated. This includes +most Rust primitive types as well as most of the syntax tree types from [`syn`]. + +[`quote::ToTokens`]: https://docs.rs/quote/0.6/quote/trait.ToTokens.html +[`syn`]: https://github.com/dtolnay/syn + +```rust +let tokens = quote! { + struct SerializeWith #generics #where_clause { + value: &'a #field_ty, + phantom: ::std::marker::PhantomData<#item_ty>, + } + + impl #generics serde::Serialize for SerializeWith #generics #where_clause { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + #path(self.value, serializer) + } + } + + SerializeWith { + value: #value, + phantom: ::std::marker::PhantomData::<#item_ty>, + } +}; +``` + +## Repetition + +Repetition is done using `#(...)*` or `#(...),*` similar to `macro_rules!`. This +iterates through the elements of any variable interpolated within the repetition +and inserts a copy of the repetition body for each one. The variables in an +interpolation may be anything that implements `IntoIterator`, including `Vec` or +a pre-existing iterator. + +- `#(#var)*` — no separators +- `#(#var),*` — the character before the asterisk is used as a separator +- `#( struct #var; )*` — the repetition can contain other things +- `#( #k => println!("{}", #v), )*` — even multiple interpolations + +Note that there is a difference between `#(#var ,)*` and `#(#var),*`—the latter +does not produce a trailing comma. This matches the behavior of delimiters in +`macro_rules!`. + +## Hygiene + +Any interpolated tokens preserve the `Span` information provided by their +`ToTokens` implementation. Tokens that originate within a `quote!` invocation +are spanned with [`Span::call_site()`]. + +[`Span::call_site()`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html#method.call_site + +A different span can be provided explicitly through the [`quote_spanned!`] +macro. + +[`quote_spanned!`]: https://docs.rs/quote/0.6/quote/macro.quote_spanned.html + +### Limitations + +- A non-repeating variable may not be interpolated inside of a repeating block + ([#7]). +- The same variable may not be interpolated more than once inside of a repeating + block ([#8]). + +[#7]: https://github.com/dtolnay/quote/issues/7 +[#8]: https://github.com/dtolnay/quote/issues/8 + +### Recursion limit + +The `quote!` macro relies on deep recursion so some large invocations may fail +with "recursion limit reached" when you compile. If it fails, bump up the +recursion limit by adding `#![recursion_limit = "128"]` to your crate. An even +higher limit may be necessary for especially large invocations. You don't need +this unless the compiler tells you that you need it. + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this crate by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. --- /dev/null +++ b/vendor/quote-0.6.8/src/ext.rs @@ -0,0 +1,115 @@ +use super::ToTokens; + +use std::iter; + +use proc_macro2::{TokenStream, TokenTree}; + +/// TokenStream extension trait with methods for appending tokens. +/// +/// This trait is sealed and cannot be implemented outside of the `quote` crate. +pub trait TokenStreamExt: private::Sealed { + fn append(&mut self, token: U) + where + U: Into; + + fn append_all(&mut self, iter: I) + where + T: ToTokens, + I: IntoIterator; + + fn append_separated(&mut self, iter: I, op: U) + where + T: ToTokens, + I: IntoIterator, + U: ToTokens; + + fn append_terminated(&mut self, iter: I, term: U) + where + T: ToTokens, + I: IntoIterator, + U: ToTokens; +} + +impl TokenStreamExt for TokenStream { + /// For use by `ToTokens` implementations. + /// + /// Appends the token specified to this list of tokens. + fn append(&mut self, token: U) + where + U: Into, + { + self.extend(iter::once(token.into())); + } + + /// For use by `ToTokens` implementations. + /// + /// ``` + /// # #[macro_use] extern crate quote; + /// # extern crate proc_macro2; + /// # use quote::{TokenStreamExt, ToTokens}; + /// # use proc_macro2::TokenStream; + /// # fn main() { + /// struct X; + /// + /// impl ToTokens for X { + /// fn to_tokens(&self, tokens: &mut TokenStream) { + /// tokens.append_all(&[true, false]); + /// } + /// } + /// + /// let tokens = quote!(#X); + /// assert_eq!(tokens.to_string(), "true false"); + /// # } + /// ``` + fn append_all(&mut self, iter: I) + where + T: ToTokens, + I: IntoIterator, + { + for token in iter { + token.to_tokens(self); + } + } + + /// For use by `ToTokens` implementations. + /// + /// Appends all of the items in the iterator `I`, separated by the tokens + /// `U`. + fn append_separated(&mut self, iter: I, op: U) + where + T: ToTokens, + I: IntoIterator, + U: ToTokens, + { + for (i, token) in iter.into_iter().enumerate() { + if i > 0 { + op.to_tokens(self); + } + token.to_tokens(self); + } + } + + /// For use by `ToTokens` implementations. + /// + /// Appends all tokens in the iterator `I`, appending `U` after each + /// element, including after the last element of the iterator. + fn append_terminated(&mut self, iter: I, term: U) + where + T: ToTokens, + I: IntoIterator, + U: ToTokens, + { + for token in iter { + token.to_tokens(self); + term.to_tokens(self); + } + } +} + +mod private { + use proc_macro2::TokenStream; + + pub trait Sealed {} + + impl Sealed for TokenStream {} +} --- /dev/null +++ b/vendor/quote-0.6.8/src/lib.rs @@ -0,0 +1,858 @@ +//! This crate provides the [`quote!`] macro for turning Rust syntax tree data +//! structures into tokens of source code. +//! +//! [`quote!`]: macro.quote.html +//! +//! Procedural macros in Rust receive a stream of tokens as input, execute +//! arbitrary Rust code to determine how to manipulate those tokens, and produce +//! a stream of tokens to hand back to the compiler to compile into the caller's +//! crate. Quasi-quoting is a solution to one piece of that -- producing tokens +//! to return to the compiler. +//! +//! The idea of quasi-quoting is that we write *code* that we treat as *data*. +//! Within the `quote!` macro, we can write what looks like code to our text +//! editor or IDE. We get all the benefits of the editor's brace matching, +//! syntax highlighting, indentation, and maybe autocompletion. But rather than +//! compiling that as code into the current crate, we can treat it as data, pass +//! it around, mutate it, and eventually hand it back to the compiler as tokens +//! to compile into the macro caller's crate. +//! +//! This crate is motivated by the procedural macro use case, but is a +//! general-purpose Rust quasi-quoting library and is not specific to procedural +//! macros. +//! +//! *Version requirement: Quote supports any compiler version back to Rust's +//! very first support for procedural macros in Rust 1.15.0.* +//! +//! ```toml +//! [dependencies] +//! quote = "0.6" +//! ``` +//! +//! ``` +//! #[macro_use] +//! extern crate quote; +//! # +//! # fn main() {} +//! ``` +//! +//! # Example +//! +//! The following quasi-quoted block of code is something you might find in [a] +//! procedural macro having to do with data structure serialization. The `#var` +//! syntax performs interpolation of runtime variables into the quoted tokens. +//! Check out the documentation of the [`quote!`] macro for more detail about +//! the syntax. See also the [`quote_spanned!`] macro which is important for +//! implementing hygienic procedural macros. +//! +//! [a]: https://serde.rs/ +//! [`quote_spanned!`]: macro.quote_spanned.html +//! +//! ``` +//! # #[macro_use] +//! # extern crate quote; +//! # +//! # fn main() { +//! # let generics = ""; +//! # let where_clause = ""; +//! # let field_ty = ""; +//! # let item_ty = ""; +//! # let path = ""; +//! # let value = ""; +//! # +//! let tokens = quote! { +//! struct SerializeWith #generics #where_clause { +//! value: &'a #field_ty, +//! phantom: ::std::marker::PhantomData<#item_ty>, +//! } +//! +//! impl #generics serde::Serialize for SerializeWith #generics #where_clause { +//! fn serialize(&self, serializer: S) -> Result +//! where +//! S: serde::Serializer, +//! { +//! #path(self.value, serializer) +//! } +//! } +//! +//! SerializeWith { +//! value: #value, +//! phantom: ::std::marker::PhantomData::<#item_ty>, +//! } +//! }; +//! # +//! # } +//! ``` +//! +//! ## Recursion limit +//! +//! The `quote!` macro relies on deep recursion so some large invocations may +//! fail with "recursion limit reached" when you compile. If it fails, bump up +//! the recursion limit by adding `#![recursion_limit = "128"]` to your crate. +//! An even higher limit may be necessary for especially large invocations. + +// Quote types in rustdoc of other crates get linked to here. +#![doc(html_root_url = "https://docs.rs/quote/0.6.8")] + +#[cfg(all( + not(all(target_arch = "wasm32", target_os = "unknown")), + feature = "proc-macro" +))] +extern crate proc_macro; +extern crate proc_macro2; + +mod ext; +pub use ext::TokenStreamExt; + +mod to_tokens; +pub use to_tokens::ToTokens; + +// Not public API. +#[doc(hidden)] +pub mod __rt { + use ext::TokenStreamExt; + pub use proc_macro2::*; + + fn is_ident_start(c: u8) -> bool { + (b'a' <= c && c <= b'z') || (b'A' <= c && c <= b'Z') || c == b'_' + } + + fn is_ident_continue(c: u8) -> bool { + (b'a' <= c && c <= b'z') + || (b'A' <= c && c <= b'Z') + || c == b'_' + || (b'0' <= c && c <= b'9') + } + + fn is_ident(token: &str) -> bool { + if token.bytes().all(|digit| digit >= b'0' && digit <= b'9') { + return false; + } + + let mut bytes = token.bytes(); + let first = bytes.next().unwrap(); + if !is_ident_start(first) { + return false; + } + for ch in bytes { + if !is_ident_continue(ch) { + return false; + } + } + true + } + + pub fn parse(tokens: &mut TokenStream, span: Span, s: &str) { + if is_ident(s) { + // Fast path, since idents are the most common token. + tokens.append(Ident::new(s, span)); + } else { + let s: TokenStream = s.parse().expect("invalid token stream"); + tokens.extend(s.into_iter().map(|mut t| { + t.set_span(span); + t + })); + } + } + + macro_rules! push_punct { + ($name:ident $char1:tt) => { + pub fn $name(tokens: &mut TokenStream, span: Span) { + let mut punct = Punct::new($char1, Spacing::Alone); + punct.set_span(span); + tokens.append(punct); + } + }; + ($name:ident $char1:tt $char2:tt) => { + pub fn $name(tokens: &mut TokenStream, span: Span) { + let mut punct = Punct::new($char1, Spacing::Joint); + punct.set_span(span); + tokens.append(punct); + let mut punct = Punct::new($char2, Spacing::Alone); + punct.set_span(span); + tokens.append(punct); + } + }; + ($name:ident $char1:tt $char2:tt $char3:tt) => { + pub fn $name(tokens: &mut TokenStream, span: Span) { + let mut punct = Punct::new($char1, Spacing::Joint); + punct.set_span(span); + tokens.append(punct); + let mut punct = Punct::new($char2, Spacing::Joint); + punct.set_span(span); + tokens.append(punct); + let mut punct = Punct::new($char3, Spacing::Alone); + punct.set_span(span); + tokens.append(punct); + } + }; + } + + push_punct!(push_add '+'); + push_punct!(push_add_eq '+' '='); + push_punct!(push_and '&'); + push_punct!(push_and_and '&' '&'); + push_punct!(push_and_eq '&' '='); + push_punct!(push_at '@'); + push_punct!(push_bang '!'); + push_punct!(push_caret '^'); + push_punct!(push_caret_eq '^' '='); + push_punct!(push_colon ':'); + push_punct!(push_colon2 ':' ':'); + push_punct!(push_comma ','); + push_punct!(push_div '/'); + push_punct!(push_div_eq '/' '='); + push_punct!(push_dot '.'); + push_punct!(push_dot2 '.' '.'); + push_punct!(push_dot3 '.' '.' '.'); + push_punct!(push_dot_dot_eq '.' '.' '='); + push_punct!(push_eq '='); + push_punct!(push_eq_eq '=' '='); + push_punct!(push_ge '>' '='); + push_punct!(push_gt '>'); + push_punct!(push_le '<' '='); + push_punct!(push_lt '<'); + push_punct!(push_mul_eq '*' '='); + push_punct!(push_ne '!' '='); + push_punct!(push_or '|'); + push_punct!(push_or_eq '|' '='); + push_punct!(push_or_or '|' '|'); + push_punct!(push_pound '#'); + push_punct!(push_question '?'); + push_punct!(push_rarrow '-' '>'); + push_punct!(push_larrow '<' '-'); + push_punct!(push_rem '%'); + push_punct!(push_rem_eq '%' '='); + push_punct!(push_fat_arrow '=' '>'); + push_punct!(push_semi ';'); + push_punct!(push_shl '<' '<'); + push_punct!(push_shl_eq '<' '<' '='); + push_punct!(push_shr '>' '>'); + push_punct!(push_shr_eq '>' '>' '='); + push_punct!(push_star '*'); + push_punct!(push_sub '-'); + push_punct!(push_sub_eq '-' '='); +} + +/// The whole point. +/// +/// Performs variable interpolation against the input and produces it as +/// [`TokenStream`]. For returning tokens to the compiler in a procedural macro, use +/// `into()` to build a `TokenStream`. +/// +/// [`TokenStream`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.TokenStream.html +/// +/// # Interpolation +/// +/// Variable interpolation is done with `#var` (similar to `$var` in +/// `macro_rules!` macros). This grabs the `var` variable that is currently in +/// scope and inserts it in that location in the output tokens. Any type +/// implementing the [`ToTokens`] trait can be interpolated. This includes most +/// Rust primitive types as well as most of the syntax tree types from the [Syn] +/// crate. +/// +/// [`ToTokens`]: trait.ToTokens.html +/// [Syn]: https://github.com/dtolnay/syn +/// +/// Repetition is done using `#(...)*` or `#(...),*` again similar to +/// `macro_rules!`. This iterates through the elements of any variable +/// interpolated within the repetition and inserts a copy of the repetition body +/// for each one. The variables in an interpolation may be anything that +/// implements `IntoIterator`, including `Vec` or a pre-existing iterator. +/// +/// - `#(#var)*` — no separators +/// - `#(#var),*` — the character before the asterisk is used as a separator +/// - `#( struct #var; )*` — the repetition can contain other tokens +/// - `#( #k => println!("{}", #v), )*` — even multiple interpolations +/// +/// # Hygiene +/// +/// Any interpolated tokens preserve the `Span` information provided by their +/// `ToTokens` implementation. Tokens that originate within the `quote!` +/// invocation are spanned with [`Span::call_site()`]. +/// +/// [`Span::call_site()`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html#method.call_site +/// +/// A different span can be provided through the [`quote_spanned!`] macro. +/// +/// [`quote_spanned!`]: macro.quote_spanned.html +/// +/// # Example +/// +/// ``` +/// # #[cfg(any())] +/// extern crate proc_macro; +/// # extern crate proc_macro2 as proc_macro; +/// +/// #[macro_use] +/// extern crate quote; +/// +/// use proc_macro::TokenStream; +/// +/// # const IGNORE_TOKENS: &'static str = stringify! { +/// #[proc_macro_derive(HeapSize)] +/// # }; +/// pub fn derive_heap_size(input: TokenStream) -> TokenStream { +/// // Parse the input and figure out what implementation to generate... +/// # const IGNORE_TOKENS: &'static str = stringify! { +/// let name = /* ... */; +/// let expr = /* ... */; +/// # }; +/// # +/// # let name = 0; +/// # let expr = 0; +/// +/// let expanded = quote! { +/// // The generated impl. +/// impl ::heapsize::HeapSize for #name { +/// fn heap_size_of_children(&self) -> usize { +/// #expr +/// } +/// } +/// }; +/// +/// // Hand the output tokens back to the compiler. +/// expanded.into() +/// } +/// # +/// # fn main() {} +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! quote { + ($($tt:tt)*) => (quote_spanned!($crate::__rt::Span::call_site()=> $($tt)*)); +} + +/// Same as `quote!`, but applies a given span to all tokens originating within +/// the macro invocation. +/// +/// # Syntax +/// +/// A span expression of type [`Span`], followed by `=>`, followed by the tokens +/// to quote. The span expression should be brief -- use a variable for anything +/// more than a few characters. There should be no space before the `=>` token. +/// +/// [`Span`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html +/// +/// ``` +/// # #[macro_use] +/// # extern crate quote; +/// # extern crate proc_macro2; +/// # +/// # use proc_macro2::Span; +/// # +/// # fn main() { +/// # const IGNORE_TOKENS: &'static str = stringify! { +/// let span = /* ... */; +/// # }; +/// # let span = Span::call_site(); +/// # let init = 0; +/// +/// // On one line, use parentheses. +/// let tokens = quote_spanned!(span=> Box::into_raw(Box::new(#init))); +/// +/// // On multiple lines, place the span at the top and use braces. +/// let tokens = quote_spanned! {span=> +/// Box::into_raw(Box::new(#init)) +/// }; +/// # } +/// ``` +/// +/// The lack of space before the `=>` should look jarring to Rust programmers +/// and this is intentional. The formatting is designed to be visibly +/// off-balance and draw the eye a particular way, due to the span expression +/// being evaluated in the context of the procedural macro and the remaining +/// tokens being evaluated in the generated code. +/// +/// # Hygiene +/// +/// Any interpolated tokens preserve the `Span` information provided by their +/// `ToTokens` implementation. Tokens that originate within the `quote_spanned!` +/// invocation are spanned with the given span argument. +/// +/// # Example +/// +/// The following procedural macro code uses `quote_spanned!` to assert that a +/// particular Rust type implements the [`Sync`] trait so that references can be +/// safely shared between threads. +/// +/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html +/// +/// ``` +/// # #[macro_use] +/// # extern crate quote; +/// # extern crate proc_macro2; +/// # +/// # use quote::{TokenStreamExt, ToTokens}; +/// # use proc_macro2::{Span, TokenStream}; +/// # +/// # struct Type; +/// # +/// # impl Type { +/// # fn span(&self) -> Span { +/// # Span::call_site() +/// # } +/// # } +/// # +/// # impl ToTokens for Type { +/// # fn to_tokens(&self, _tokens: &mut TokenStream) {} +/// # } +/// # +/// # fn main() { +/// # let ty = Type; +/// # let call_site = Span::call_site(); +/// # +/// let ty_span = ty.span(); +/// let assert_sync = quote_spanned! {ty_span=> +/// struct _AssertSync where #ty: Sync; +/// }; +/// # } +/// ``` +/// +/// If the assertion fails, the user will see an error like the following. The +/// input span of their type is hightlighted in the error. +/// +/// ```text +/// error[E0277]: the trait bound `*const (): std::marker::Sync` is not satisfied +/// --> src/main.rs:10:21 +/// | +/// 10 | static ref PTR: *const () = &(); +/// | ^^^^^^^^^ `*const ()` cannot be shared between threads safely +/// ``` +/// +/// In this example it is important for the where-clause to be spanned with the +/// line/column information of the user's input type so that error messages are +/// placed appropriately by the compiler. But it is also incredibly important +/// that `Sync` resolves at the macro definition site and not the macro call +/// site. If we resolve `Sync` at the same span that the user's type is going to +/// be resolved, then they could bypass our check by defining their own trait +/// named `Sync` that is implemented for their type. +#[macro_export(local_inner_macros)] +macro_rules! quote_spanned { + ($span:expr=> $($tt:tt)*) => { + { + let mut _s = $crate::__rt::TokenStream::new(); + let _span = $span; + quote_each_token!(_s _span $($tt)*); + _s + } + }; +} + +// Extract the names of all #metavariables and pass them to the $finish macro. +// +// in: pounded_var_names!(then () a #b c #( #d )* #e) +// out: then!(() b d e) +#[macro_export(local_inner_macros)] +#[doc(hidden)] +macro_rules! pounded_var_names { + ($finish:ident ($($found:ident)*) # ( $($inner:tt)* ) $($rest:tt)*) => { + pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) + }; + + ($finish:ident ($($found:ident)*) # [ $($inner:tt)* ] $($rest:tt)*) => { + pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) + }; + + ($finish:ident ($($found:ident)*) # { $($inner:tt)* } $($rest:tt)*) => { + pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) + }; + + ($finish:ident ($($found:ident)*) # $first:ident $($rest:tt)*) => { + pounded_var_names!($finish ($($found)* $first) $($rest)*) + }; + + ($finish:ident ($($found:ident)*) ( $($inner:tt)* ) $($rest:tt)*) => { + pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) + }; + + ($finish:ident ($($found:ident)*) [ $($inner:tt)* ] $($rest:tt)*) => { + pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) + }; + + ($finish:ident ($($found:ident)*) { $($inner:tt)* } $($rest:tt)*) => { + pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) + }; + + ($finish:ident ($($found:ident)*) $ignore:tt $($rest:tt)*) => { + pounded_var_names!($finish ($($found)*) $($rest)*) + }; + + ($finish:ident ($($found:ident)*)) => { + $finish!(() $($found)*) + }; +} + +// in: nested_tuples_pat!(() a b c d e) +// out: ((((a b) c) d) e) +// +// in: nested_tuples_pat!(() a) +// out: a +#[macro_export(local_inner_macros)] +#[doc(hidden)] +macro_rules! nested_tuples_pat { + (()) => { + &() + }; + + (() $first:ident $($rest:ident)*) => { + nested_tuples_pat!(($first) $($rest)*) + }; + + (($pat:pat) $first:ident $($rest:ident)*) => { + nested_tuples_pat!((($pat, $first)) $($rest)*) + }; + + (($done:pat)) => { + $done + }; +} + +// in: multi_zip_expr!(() a b c d e) +// out: a.into_iter().zip(b).zip(c).zip(d).zip(e) +// +// in: multi_zip_iter!(() a) +// out: a +#[macro_export(local_inner_macros)] +#[doc(hidden)] +macro_rules! multi_zip_expr { + (()) => { + &[] + }; + + (() $single:ident) => { + $single + }; + + (() $first:ident $($rest:ident)*) => { + multi_zip_expr!(($first.into_iter()) $($rest)*) + }; + + (($zips:expr) $first:ident $($rest:ident)*) => { + multi_zip_expr!(($zips.zip($first)) $($rest)*) + }; + + (($done:expr)) => { + $done + }; +} + +#[macro_export(local_inner_macros)] +#[doc(hidden)] +macro_rules! quote_each_token { + ($tokens:ident $span:ident) => {}; + + ($tokens:ident $span:ident # ! $($rest:tt)*) => { + quote_each_token!($tokens $span #); + quote_each_token!($tokens $span !); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident # ( $($inner:tt)* ) * $($rest:tt)*) => { + for pounded_var_names!(nested_tuples_pat () $($inner)*) + in pounded_var_names!(multi_zip_expr () $($inner)*) { + quote_each_token!($tokens $span $($inner)*); + } + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident # ( $($inner:tt)* ) $sep:tt * $($rest:tt)*) => { + for (_i, pounded_var_names!(nested_tuples_pat () $($inner)*)) + in pounded_var_names!(multi_zip_expr () $($inner)*).into_iter().enumerate() { + if _i > 0 { + quote_each_token!($tokens $span $sep); + } + quote_each_token!($tokens $span $($inner)*); + } + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident # [ $($inner:tt)* ] $($rest:tt)*) => { + quote_each_token!($tokens $span #); + $tokens.extend({ + let mut g = $crate::__rt::Group::new( + $crate::__rt::Delimiter::Bracket, + quote_spanned!($span=> $($inner)*), + ); + g.set_span($span); + Some($crate::__rt::TokenTree::from(g)) + }); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident # $first:ident $($rest:tt)*) => { + $crate::ToTokens::to_tokens(&$first, &mut $tokens); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident ( $($first:tt)* ) $($rest:tt)*) => { + $tokens.extend({ + let mut g = $crate::__rt::Group::new( + $crate::__rt::Delimiter::Parenthesis, + quote_spanned!($span=> $($first)*), + ); + g.set_span($span); + Some($crate::__rt::TokenTree::from(g)) + }); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident [ $($first:tt)* ] $($rest:tt)*) => { + $tokens.extend({ + let mut g = $crate::__rt::Group::new( + $crate::__rt::Delimiter::Bracket, + quote_spanned!($span=> $($first)*), + ); + g.set_span($span); + Some($crate::__rt::TokenTree::from(g)) + }); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident { $($first:tt)* } $($rest:tt)*) => { + $tokens.extend({ + let mut g = $crate::__rt::Group::new( + $crate::__rt::Delimiter::Brace, + quote_spanned!($span=> $($first)*), + ); + g.set_span($span); + Some($crate::__rt::TokenTree::from(g)) + }); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident + $($rest:tt)*) => { + $crate::__rt::push_add(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident += $($rest:tt)*) => { + $crate::__rt::push_add_eq(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident & $($rest:tt)*) => { + $crate::__rt::push_and(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident && $($rest:tt)*) => { + $crate::__rt::push_and_and(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident &= $($rest:tt)*) => { + $crate::__rt::push_and_eq(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident @ $($rest:tt)*) => { + $crate::__rt::push_at(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident ! $($rest:tt)*) => { + $crate::__rt::push_bang(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident ^ $($rest:tt)*) => { + $crate::__rt::push_caret(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident ^= $($rest:tt)*) => { + $crate::__rt::push_caret_eq(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident : $($rest:tt)*) => { + $crate::__rt::push_colon(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident :: $($rest:tt)*) => { + $crate::__rt::push_colon2(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident , $($rest:tt)*) => { + $crate::__rt::push_comma(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident / $($rest:tt)*) => { + $crate::__rt::push_div(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident /= $($rest:tt)*) => { + $crate::__rt::push_div_eq(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident . $($rest:tt)*) => { + $crate::__rt::push_dot(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident .. $($rest:tt)*) => { + $crate::__rt::push_dot2(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident ... $($rest:tt)*) => { + $crate::__rt::push_dot3(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident ..= $($rest:tt)*) => { + $crate::__rt::push_dot_dot_eq(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident = $($rest:tt)*) => { + $crate::__rt::push_eq(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident == $($rest:tt)*) => { + $crate::__rt::push_eq_eq(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident >= $($rest:tt)*) => { + $crate::__rt::push_ge(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident > $($rest:tt)*) => { + $crate::__rt::push_gt(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident <= $($rest:tt)*) => { + $crate::__rt::push_le(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident < $($rest:tt)*) => { + $crate::__rt::push_lt(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident *= $($rest:tt)*) => { + $crate::__rt::push_mul_eq(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident != $($rest:tt)*) => { + $crate::__rt::push_ne(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident | $($rest:tt)*) => { + $crate::__rt::push_or(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident |= $($rest:tt)*) => { + $crate::__rt::push_or_eq(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident || $($rest:tt)*) => { + $crate::__rt::push_or_or(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident # $($rest:tt)*) => { + $crate::__rt::push_pound(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident ? $($rest:tt)*) => { + $crate::__rt::push_question(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident -> $($rest:tt)*) => { + $crate::__rt::push_rarrow(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident <- $($rest:tt)*) => { + $crate::__rt::push_larrow(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident % $($rest:tt)*) => { + $crate::__rt::push_rem(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident %= $($rest:tt)*) => { + $crate::__rt::push_rem_eq(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident => $($rest:tt)*) => { + $crate::__rt::push_fat_arrow(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident ; $($rest:tt)*) => { + $crate::__rt::push_semi(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident << $($rest:tt)*) => { + $crate::__rt::push_shl(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident <<= $($rest:tt)*) => { + $crate::__rt::push_shl_eq(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident >> $($rest:tt)*) => { + $crate::__rt::push_shr(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident >>= $($rest:tt)*) => { + $crate::__rt::push_shr_eq(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident * $($rest:tt)*) => { + $crate::__rt::push_star(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident - $($rest:tt)*) => { + $crate::__rt::push_sub(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident -= $($rest:tt)*) => { + $crate::__rt::push_sub_eq(&mut $tokens, $span); + quote_each_token!($tokens $span $($rest)*); + }; + + ($tokens:ident $span:ident $first:tt $($rest:tt)*) => { + $crate::__rt::parse(&mut $tokens, $span, quote_stringify!($first)); + quote_each_token!($tokens $span $($rest)*); + }; +} + +// Unhygienically invoke whatever `stringify` the caller has in scope i.e. not a +// local macro. The macros marked `local_inner_macros` above cannot invoke +// `stringify` directly. +#[macro_export] +#[doc(hidden)] +macro_rules! quote_stringify { + ($tt:tt) => { + stringify!($tt) + }; +} --- /dev/null +++ b/vendor/quote-0.6.8/src/to_tokens.rs @@ -0,0 +1,197 @@ +use super::TokenStreamExt; + +use std::borrow::Cow; +use std::iter; + +use proc_macro2::{Group, Ident, Literal, Punct, Span, TokenStream, TokenTree}; + +/// Types that can be interpolated inside a [`quote!`] invocation. +/// +/// [`quote!`]: macro.quote.html +pub trait ToTokens { + /// Write `self` to the given `TokenStream`. + /// + /// The token append methods provided by the [`TokenStreamExt`] extension + /// trait may be useful for implementing `ToTokens`. + /// + /// [`TokenStreamExt`]: trait.TokenStreamExt.html + /// + /// # Example + /// + /// Example implementation for a struct representing Rust paths like + /// `std::cmp::PartialEq`: + /// + /// ``` + /// extern crate quote; + /// use quote::{TokenStreamExt, ToTokens}; + /// + /// extern crate proc_macro2; + /// use proc_macro2::{TokenTree, Spacing, Span, Punct, TokenStream}; + /// + /// pub struct Path { + /// pub global: bool, + /// pub segments: Vec, + /// } + /// + /// impl ToTokens for Path { + /// fn to_tokens(&self, tokens: &mut TokenStream) { + /// for (i, segment) in self.segments.iter().enumerate() { + /// if i > 0 || self.global { + /// // Double colon `::` + /// tokens.append(Punct::new(':', Spacing::Joint)); + /// tokens.append(Punct::new(':', Spacing::Alone)); + /// } + /// segment.to_tokens(tokens); + /// } + /// } + /// } + /// # + /// # pub struct PathSegment; + /// # + /// # impl ToTokens for PathSegment { + /// # fn to_tokens(&self, tokens: &mut TokenStream) { + /// # unimplemented!() + /// # } + /// # } + /// # + /// # fn main() {} + /// ``` + fn to_tokens(&self, tokens: &mut TokenStream); + + /// Convert `self` directly into a `TokenStream` object. + /// + /// This method is implicitly implemented using `to_tokens`, and acts as a + /// convenience method for consumers of the `ToTokens` trait. + fn into_token_stream(self) -> TokenStream + where + Self: Sized, + { + let mut tokens = TokenStream::new(); + self.to_tokens(&mut tokens); + tokens + } +} + +impl<'a, T: ?Sized + ToTokens> ToTokens for &'a T { + fn to_tokens(&self, tokens: &mut TokenStream) { + (**self).to_tokens(tokens); + } +} + +impl<'a, T: ?Sized + ToTokens> ToTokens for &'a mut T { + fn to_tokens(&self, tokens: &mut TokenStream) { + (**self).to_tokens(tokens); + } +} + +impl<'a, T: ?Sized + ToOwned + ToTokens> ToTokens for Cow<'a, T> { + fn to_tokens(&self, tokens: &mut TokenStream) { + (**self).to_tokens(tokens); + } +} + +impl ToTokens for Box { + fn to_tokens(&self, tokens: &mut TokenStream) { + (**self).to_tokens(tokens); + } +} + +impl ToTokens for Option { + fn to_tokens(&self, tokens: &mut TokenStream) { + if let Some(ref t) = *self { + t.to_tokens(tokens); + } + } +} + +impl ToTokens for str { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::string(self)); + } +} + +impl ToTokens for String { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.as_str().to_tokens(tokens); + } +} + +macro_rules! primitive { + ($($t:ident => $name:ident)*) => ($( + impl ToTokens for $t { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::$name(*self)); + } + } + )*) +} + +primitive! { + i8 => i8_suffixed + i16 => i16_suffixed + i32 => i32_suffixed + i64 => i64_suffixed + isize => isize_suffixed + + u8 => u8_suffixed + u16 => u16_suffixed + u32 => u32_suffixed + u64 => u64_suffixed + usize => usize_suffixed + + f32 => f32_suffixed + f64 => f64_suffixed +} + +impl ToTokens for char { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Literal::character(*self)); + } +} + +impl ToTokens for bool { + fn to_tokens(&self, tokens: &mut TokenStream) { + let word = if *self { "true" } else { "false" }; + tokens.append(Ident::new(word, Span::call_site())); + } +} + +impl ToTokens for Group { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(self.clone()); + } +} + +impl ToTokens for Ident { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(self.clone()); + } +} + +impl ToTokens for Punct { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(self.clone()); + } +} + +impl ToTokens for Literal { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(self.clone()); + } +} + +impl ToTokens for TokenTree { + fn to_tokens(&self, dst: &mut TokenStream) { + dst.append(self.clone()); + } +} + +impl ToTokens for TokenStream { + fn to_tokens(&self, dst: &mut TokenStream) { + dst.extend(iter::once(self.clone())); + } + + fn into_token_stream(self) -> TokenStream { + self + } +} --- /dev/null +++ b/vendor/quote-0.6.8/tests/test.rs @@ -0,0 +1,290 @@ +#![cfg_attr(feature = "cargo-clippy", allow(blacklisted_name))] + +use std::borrow::Cow; + +extern crate proc_macro2; +#[macro_use] +extern crate quote; + +use proc_macro2::{Ident, Span, TokenStream}; +use quote::TokenStreamExt; + +struct X; + +impl quote::ToTokens for X { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append(Ident::new("X", Span::call_site())); + } +} + +#[test] +fn test_quote_impl() { + let tokens = quote! { + impl<'a, T: ToTokens> ToTokens for &'a T { + fn to_tokens(&self, tokens: &mut TokenStream) { + (**self).to_tokens(tokens) + } + } + }; + + let expected = concat!( + "impl < 'a , T : ToTokens > ToTokens for & 'a T { ", + "fn to_tokens ( & self , tokens : & mut TokenStream ) { ", + "( * * self ) . to_tokens ( tokens ) ", + "} ", + "}" + ); + + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_substitution() { + let x = X; + let tokens = quote!(#x <#x> (#x) [#x] {#x}); + + let expected = "X < X > ( X ) [ X ] { X }"; + + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_iter() { + let primes = &[X, X, X, X]; + + assert_eq!("X X X X", quote!(#(#primes)*).to_string()); + + assert_eq!("X , X , X , X ,", quote!(#(#primes,)*).to_string()); + + assert_eq!("X , X , X , X", quote!(#(#primes),*).to_string()); +} + +#[test] +fn test_advanced() { + let generics = quote!( <'a, T> ); + + let where_clause = quote!( where T: Serialize ); + + let field_ty = quote!(String); + + let item_ty = quote!(Cow<'a, str>); + + let path = quote!(SomeTrait::serialize_with); + + let value = quote!(self.x); + + let tokens = quote! { + struct SerializeWith #generics #where_clause { + value: &'a #field_ty, + phantom: ::std::marker::PhantomData<#item_ty>, + } + + impl #generics ::serde::Serialize for SerializeWith #generics #where_clause { + fn serialize(&self, s: &mut S) -> Result<(), S::Error> + where S: ::serde::Serializer + { + #path(self.value, s) + } + } + + SerializeWith { + value: #value, + phantom: ::std::marker::PhantomData::<#item_ty>, + } + }; + + let expected = concat!( + "struct SerializeWith < 'a , T > where T : Serialize { ", + "value : & 'a String , ", + "phantom : :: std :: marker :: PhantomData < Cow < 'a , str > > , ", + "} ", + "impl < 'a , T > :: serde :: Serialize for SerializeWith < 'a , T > where T : Serialize { ", + "fn serialize < S > ( & self , s : & mut S ) -> Result < ( ) , S :: Error > ", + "where S : :: serde :: Serializer ", + "{ ", + "SomeTrait :: serialize_with ( self . value , s ) ", + "} ", + "} ", + "SerializeWith { ", + "value : self . x , ", + "phantom : :: std :: marker :: PhantomData :: < Cow < 'a , str > > , ", + "}" + ); + + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_integer() { + let ii8 = -1i8; + let ii16 = -1i16; + let ii32 = -1i32; + let ii64 = -1i64; + let iisize = -1isize; + let uu8 = 1u8; + let uu16 = 1u16; + let uu32 = 1u32; + let uu64 = 1u64; + let uusize = 1usize; + + let tokens = quote! { + #ii8 #ii16 #ii32 #ii64 #iisize + #uu8 #uu16 #uu32 #uu64 #uusize + }; + let expected = "-1i8 -1i16 -1i32 -1i64 -1isize 1u8 1u16 1u32 1u64 1usize"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_floating() { + let e32 = 2.345f32; + + let e64 = 2.345f64; + + let tokens = quote! { + #e32 + #e64 + }; + let expected = concat!("2.345f32 2.345f64"); + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_char() { + let zero = '\0'; + let pound = '#'; + let quote = '"'; + let apost = '\''; + let newline = '\n'; + let heart = '\u{2764}'; + + let tokens = quote! { + #zero #pound #quote #apost #newline #heart + }; + let expected = "'\\u{0}' '#' '\\\"' '\\'' '\\n' '\\u{2764}'"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_str() { + let s = "\0 a 'b \" c"; + let tokens = quote!(#s); + let expected = "\"\\u{0} a \\'b \\\" c\""; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_string() { + let s = "\0 a 'b \" c".to_string(); + let tokens = quote!(#s); + let expected = "\"\\u{0} a \\'b \\\" c\""; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_ident() { + let foo = Ident::new("Foo", Span::call_site()); + let bar = Ident::new(&format!("Bar{}", 7), Span::call_site()); + let tokens = quote!(struct #foo; enum #bar {}); + let expected = "struct Foo ; enum Bar7 { }"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_duplicate() { + let ch = 'x'; + + let tokens = quote!(#ch #ch); + + let expected = "'x' 'x'"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_fancy_repetition() { + let foo = vec!["a", "b"]; + let bar = vec![true, false]; + + let tokens = quote! { + #(#foo: #bar),* + }; + + let expected = r#""a" : true , "b" : false"#; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_nested_fancy_repetition() { + let nested = vec![vec!['a', 'b', 'c'], vec!['x', 'y', 'z']]; + + let tokens = quote! { + #( + #(#nested)* + ),* + }; + + let expected = "'a' 'b' 'c' , 'x' 'y' 'z'"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_empty_repetition() { + let tokens = quote!(#(a b)* #(c d),*); + assert_eq!("", tokens.to_string()); +} + +#[test] +fn test_variable_name_conflict() { + // The implementation of `#(...),*` uses the variable `_i` but it should be + // fine, if a little confusing when debugging. + let _i = vec!['a', 'b']; + let tokens = quote! { #(#_i),* }; + let expected = "'a' , 'b'"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_empty_quote() { + let tokens = quote!(); + assert_eq!("", tokens.to_string()); +} + +#[test] +fn test_box_str() { + let b = "str".to_owned().into_boxed_str(); + let tokens = quote! { #b }; + assert_eq!("\"str\"", tokens.to_string()); +} + +#[test] +fn test_cow() { + let owned: Cow = Cow::Owned(Ident::new("owned", Span::call_site())); + + let ident = Ident::new("borrowed", Span::call_site()); + let borrowed = Cow::Borrowed(&ident); + + let tokens = quote! { #owned #borrowed }; + assert_eq!("owned borrowed", tokens.to_string()); +} + +#[test] +fn test_closure() { + fn field_i(i: usize) -> Ident { + Ident::new(&format!("__field{}", i), Span::call_site()) + } + + let fields = (0usize..3) + .map(field_i as fn(_) -> _) + .map(|var| quote! { #var }); + + let tokens = quote! { #(#fields)* }; + assert_eq!("__field0 __field1 __field2", tokens.to_string()); +} + +#[test] +fn test_append_tokens() { + let mut a = quote!(a); + let b = quote!(b); + a.append_all(b); + assert_eq!("a b", a.to_string()); +} --- a/vendor/quote/.cargo-checksum.json +++ b/vendor/quote/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"9feeb486226fb5d6c4ecc64fd3dbad083b61ec293d3e66df771052bc33b2f3d7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"b43ef9b9c61628f8de7036271e61322cba23d878d056748e571f4f6cf9fba1b1","src/ext.rs":"a2def0b0f24c822b3f936a781c347e5f6fdc75120f85874c94f5e7eb708168c2","src/lib.rs":"f05cf3c52db6f8946323898385682455ba3816e10e417f5a737bc80767e68d7c","src/to_tokens.rs":"6c6e37057d21e4f6a7cb2d82194e981de8ed138354bbdb04e2fdeabad4a39e28","tests/test.rs":"90fe0e9a704e628339fe9298f0cb8307e94ebadfe28fffd7b2fc2d94203bc342"},"package":"dd636425967c33af890042c483632d33fa7a18f19ad1d7ea72e8998c6ef8dea5"} \ No newline at end of file +{"files":{"Cargo.toml":"b5c36a5bffa3623f84002fa884157ae303d2dae68d2f8a6d73ba87e82d7c56d7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"ea5abae24fdf6d9be51c80427bd12b95d146c4660e872599910cf062d6fbab9a","src/ext.rs":"a9fed3a1a4c9d3f2de717ba808af99291b995db2cbf8067f4b6927c39cc62bc6","src/format.rs":"46bf0859e6da5ec195a409ba8bbd2029d32a30d169c30c4c8aee7020f478a8a2","src/ident_fragment.rs":"0824dca06942d8e097d220db0ace0fe3ae7cf08f0a86e9828d012c131b6590c2","src/lib.rs":"bce63d6d9822373dab6f9a1f3df419b5753625e618474c304f05ab3b38845760","src/runtime.rs":"13263adfb56e2c597c69277b3500ab35ca8a08f60ba6a66f921ffa5cdc09bde2","src/spanned.rs":"adc0ed742ad17327c375879472d435cea168c208c303f53eb93cb2c0f10f3650","src/to_tokens.rs":"e589c1643479a9003d4dd1d9fa63714042b106f1b16d8ea3903cfe2f73a020f5","tests/compiletest.rs":"0a52a44786aea1c299c695bf948b2ed2081e4cc344e5c2cadceab4eb03d0010d","tests/test.rs":"92062fb9ba4a3b74345fede8e09e1d376107f98dcd79931a794433fa2d74aeb5","tests/ui/does-not-have-iter-interpolated-dup.rs":"ad13eea21d4cdd2ab6c082f633392e1ff20fb0d1af5f2177041e0bf7f30da695","tests/ui/does-not-have-iter-interpolated.rs":"83a5b3f240651adcbe4b6e51076d76d653ad439b37442cf4054f1fd3c073f3b7","tests/ui/does-not-have-iter-separated.rs":"fe413c48331d5e3a7ae5fef6a5892a90c72f610d54595879eb49d0a94154ba3f","tests/ui/does-not-have-iter.rs":"09dc9499d861b63cebb0848b855b78e2dc9497bfde37ba6339f3625ae009a62f","tests/ui/not-quotable.rs":"5759d0884943417609f28faadc70254a3e2fd3d9bd6ff7297a3fb70a77fafd8a","tests/ui/not-repeatable.rs":"b08405e02d46712d47e48ec8d0d68c93d8ebf3bb299714a373c2c954de79f6bd","tests/ui/wrong-type-span.rs":"5f310cb7fde3ef51bad01e7f286d244e3b6e67396cd2ea7eab77275c9d902699"},"package":"053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe"} \ No newline at end of file --- a/vendor/quote/Cargo.toml +++ b/vendor/quote/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -11,19 +11,29 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "quote" -version = "0.6.8" +version = "1.0.2" authors = ["David Tolnay "] include = ["Cargo.toml", "src/**/*.rs", "tests/**/*.rs", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] description = "Quasi-quoting macro quote!(...)" documentation = "https://docs.rs/quote/" readme = "README.md" keywords = ["syn"] -license = "MIT/Apache-2.0" +categories = ["development-tools::procedural-macro-helpers"] +license = "MIT OR Apache-2.0" repository = "https://github.com/dtolnay/quote" + +[lib] +name = "quote" [dependencies.proc-macro2] -version = "0.4.13" +version = "1.0" default-features = false +[dev-dependencies.rustversion] +version = "0.1" + +[dev-dependencies.trybuild] +version = "1.0" [features] default = ["proc-macro"] --- a/vendor/quote/README.md +++ b/vendor/quote/README.md @@ -8,13 +8,13 @@ Rust Quasi-Quoting This crate provides the [`quote!`] macro for turning Rust syntax tree data structures into tokens of source code. -[`quote!`]: https://docs.rs/quote/0.6/quote/macro.quote.html +[`quote!`]: https://docs.rs/quote/1.0/quote/macro.quote.html Procedural macros in Rust receive a stream of tokens as input, execute arbitrary Rust code to determine how to manipulate those tokens, and produce a stream of tokens to hand back to the compiler to compile into the caller's crate. -Quasi-quoting is a solution to one piece of that -- producing tokens to return -to the compiler. +Quasi-quoting is a solution to one piece of that — producing tokens to +return to the compiler. The idea of quasi-quoting is that we write *code* that we treat as *data*. Within the `quote!` macro, we can write what looks like code to our text editor @@ -35,36 +35,29 @@ first support for procedural macros in R ```toml [dependencies] -quote = "0.6" -``` - -```rust -#[macro_use] -extern crate quote; +quote = "1.0" ``` ## Syntax The quote crate provides a [`quote!`] macro within which you can write Rust code that gets packaged into a [`TokenStream`] and can be treated as data. You should -think of `TokenStream` as representing a fragment of Rust source code. This type -can be returned directly back to the compiler by a procedural macro to get -compiled into the caller's crate. +think of `TokenStream` as representing a fragment of Rust source code. -[`TokenStream`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.TokenStream.html +[`TokenStream`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.TokenStream.html Within the `quote!` macro, interpolation is done with `#var`. Any type implementing the [`quote::ToTokens`] trait can be interpolated. This includes most Rust primitive types as well as most of the syntax tree types from [`syn`]. -[`quote::ToTokens`]: https://docs.rs/quote/0.6/quote/trait.ToTokens.html +[`quote::ToTokens`]: https://docs.rs/quote/1.0/quote/trait.ToTokens.html [`syn`]: https://github.com/dtolnay/syn ```rust let tokens = quote! { struct SerializeWith #generics #where_clause { value: &'a #field_ty, - phantom: ::std::marker::PhantomData<#item_ty>, + phantom: core::marker::PhantomData<#item_ty>, } impl #generics serde::Serialize for SerializeWith #generics #where_clause { @@ -78,7 +71,7 @@ let tokens = quote! { SerializeWith { value: #value, - phantom: ::std::marker::PhantomData::<#item_ty>, + phantom: core::marker::PhantomData::<#item_ty>, } }; ``` @@ -100,48 +93,145 @@ Note that there is a difference between does not produce a trailing comma. This matches the behavior of delimiters in `macro_rules!`. -## Hygiene +## Returning tokens to the compiler -Any interpolated tokens preserve the `Span` information provided by their -`ToTokens` implementation. Tokens that originate within a `quote!` invocation -are spanned with [`Span::call_site()`]. +The `quote!` macro evaluates to an expression of type +`proc_macro2::TokenStream`. Meanwhile Rust procedural macros are expected to +return the type `proc_macro::TokenStream`. -[`Span::call_site()`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html#method.call_site +The difference between the two types is that `proc_macro` types are entirely +specific to procedural macros and cannot ever exist in code outside of a +procedural macro, while `proc_macro2` types may exist anywhere including tests +and non-macro code like main.rs and build.rs. This is why even the procedural +macro ecosystem is largely built around `proc_macro2`, because that ensures the +libraries are unit testable and accessible in non-macro contexts. -A different span can be provided explicitly through the [`quote_spanned!`] -macro. +There is a [`From`]-conversion in both directions so returning the output of +`quote!` from a procedural macro usually looks like `tokens.into()` or +`proc_macro::TokenStream::from(tokens)`. + +[`From`]: https://doc.rust-lang.org/std/convert/trait.From.html + +## Examples + +### Combining quoted fragments + +Usually you don't end up constructing an entire final `TokenStream` in one +piece. Different parts may come from different helper functions. The tokens +produced by `quote!` themselves implement `ToTokens` and so can be interpolated +into later `quote!` invocations to build up a final result. + +```rust +let type_definition = quote! {...}; +let methods = quote! {...}; + +let tokens = quote! { + #type_definition + #methods +}; +``` + +### Constructing identifiers + +Suppose we have an identifier `ident` which came from somewhere in a macro +input and we need to modify it in some way for the macro output. Let's consider +prepending the identifier with an underscore. + +Simply interpolating the identifier next to an underscore will not have the +behavior of concatenating them. The underscore and the identifier will continue +to be two separate tokens as if you had written `_ x`. + +```rust +// incorrect +quote! { + let mut _#ident = 0; +} +``` + +The solution is to build a new identifier token with the correct value. As this +is such a common case, the `format_ident!` macro provides a convenient utility +for doing so correctly. -[`quote_spanned!`]: https://docs.rs/quote/0.6/quote/macro.quote_spanned.html +```rust +let varname = format_ident!("_{}", ident); +quote! { + let mut #varname = 0; +} +``` + +Alternatively, the APIs provided by Syn and proc-macro2 can be used to directly +build the identifier. This is roughly equivalent to the above, but will not +handle `ident` being a raw identifier. + +```rust +let concatenated = format!("_{}", ident); +let varname = syn::Ident::new(&concatenated, ident.span()); +quote! { + let mut #varname = 0; +} +``` + +### Making method calls -### Limitations +Let's say our macro requires some type specified in the macro input to have a +constructor called `new`. We have the type in a variable called `field_type` of +type `syn::Type` and want to invoke the constructor. -- A non-repeating variable may not be interpolated inside of a repeating block - ([#7]). -- The same variable may not be interpolated more than once inside of a repeating - block ([#8]). +```rust +// incorrect +quote! { + let value = #field_type::new(); +} +``` + +This works only sometimes. If `field_type` is `String`, the expanded code +contains `String::new()` which is fine. But if `field_type` is something like +`Vec` then the expanded code is `Vec::new()` which is invalid syntax. +Ordinarily in handwritten Rust we would write `Vec::::new()` but for macros +often the following is more convenient. + +```rust +quote! { + let value = <#field_type>::new(); +} +``` -[#7]: https://github.com/dtolnay/quote/issues/7 -[#8]: https://github.com/dtolnay/quote/issues/8 +This expands to `>::new()` which behaves correctly. -### Recursion limit +A similar pattern is appropriate for trait methods. -The `quote!` macro relies on deep recursion so some large invocations may fail -with "recursion limit reached" when you compile. If it fails, bump up the -recursion limit by adding `#![recursion_limit = "128"]` to your crate. An even -higher limit may be necessary for especially large invocations. You don't need -this unless the compiler tells you that you need it. +```rust +quote! { + let value = <#field_type as core::default::Default>::default(); +} +``` + +## Hygiene + +Any interpolated tokens preserve the `Span` information provided by their +`ToTokens` implementation. Tokens that originate within a `quote!` invocation +are spanned with [`Span::call_site()`]. + +[`Span::call_site()`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.Span.html#method.call_site + +A different span can be provided explicitly through the [`quote_spanned!`] +macro. -## License +[`quote_spanned!`]: https://docs.rs/quote/1.0/quote/macro.quote_spanned.html -Licensed under either of +
- * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) +#### License -at your option. + +Licensed under either of Apache License, Version +2.0 or MIT license at your option. + -### Contribution +
+ Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in this crate by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. + --- a/vendor/quote/src/ext.rs +++ b/vendor/quote/src/ext.rs @@ -8,47 +8,19 @@ use proc_macro2::{TokenStream, TokenTree /// /// This trait is sealed and cannot be implemented outside of the `quote` crate. pub trait TokenStreamExt: private::Sealed { - fn append(&mut self, token: U) - where - U: Into; - - fn append_all(&mut self, iter: I) - where - T: ToTokens, - I: IntoIterator; - - fn append_separated(&mut self, iter: I, op: U) - where - T: ToTokens, - I: IntoIterator, - U: ToTokens; - - fn append_terminated(&mut self, iter: I, term: U) - where - T: ToTokens, - I: IntoIterator, - U: ToTokens; -} - -impl TokenStreamExt for TokenStream { /// For use by `ToTokens` implementations. /// /// Appends the token specified to this list of tokens. fn append(&mut self, token: U) where - U: Into, - { - self.extend(iter::once(token.into())); - } + U: Into; /// For use by `ToTokens` implementations. /// /// ``` - /// # #[macro_use] extern crate quote; - /// # extern crate proc_macro2; - /// # use quote::{TokenStreamExt, ToTokens}; + /// # use quote::{quote, TokenStreamExt, ToTokens}; /// # use proc_macro2::TokenStream; - /// # fn main() { + /// # /// struct X; /// /// impl ToTokens for X { @@ -59,26 +31,55 @@ impl TokenStreamExt for TokenStream { /// /// let tokens = quote!(#X); /// assert_eq!(tokens.to_string(), "true false"); - /// # } /// ``` - fn append_all(&mut self, iter: I) + fn append_all(&mut self, iter: I) + where + I: IntoIterator, + I::Item: ToTokens; + + /// For use by `ToTokens` implementations. + /// + /// Appends all of the items in the iterator `I`, separated by the tokens + /// `U`. + fn append_separated(&mut self, iter: I, op: U) + where + I: IntoIterator, + I::Item: ToTokens, + U: ToTokens; + + /// For use by `ToTokens` implementations. + /// + /// Appends all tokens in the iterator `I`, appending `U` after each + /// element, including after the last element of the iterator. + fn append_terminated(&mut self, iter: I, term: U) + where + I: IntoIterator, + I::Item: ToTokens, + U: ToTokens; +} + +impl TokenStreamExt for TokenStream { + fn append(&mut self, token: U) + where + U: Into, + { + self.extend(iter::once(token.into())); + } + + fn append_all(&mut self, iter: I) where - T: ToTokens, - I: IntoIterator, + I: IntoIterator, + I::Item: ToTokens, { for token in iter { token.to_tokens(self); } } - /// For use by `ToTokens` implementations. - /// - /// Appends all of the items in the iterator `I`, separated by the tokens - /// `U`. - fn append_separated(&mut self, iter: I, op: U) + fn append_separated(&mut self, iter: I, op: U) where - T: ToTokens, - I: IntoIterator, + I: IntoIterator, + I::Item: ToTokens, U: ToTokens, { for (i, token) in iter.into_iter().enumerate() { @@ -89,14 +90,10 @@ impl TokenStreamExt for TokenStream { } } - /// For use by `ToTokens` implementations. - /// - /// Appends all tokens in the iterator `I`, appending `U` after each - /// element, including after the last element of the iterator. - fn append_terminated(&mut self, iter: I, term: U) + fn append_terminated(&mut self, iter: I, term: U) where - T: ToTokens, - I: IntoIterator, + I: IntoIterator, + I::Item: ToTokens, U: ToTokens, { for token in iter { --- /dev/null +++ b/vendor/quote/src/format.rs @@ -0,0 +1,164 @@ +/// Formatting macro for constructing `Ident`s. +/// +///
+/// +/// # Syntax +/// +/// Syntax is copied from the [`format!`] macro, supporting both positional and +/// named arguments. +/// +/// Only a limited set of formatting traits are supported. The current mapping +/// of format types to traits is: +/// +/// * `{}` ⇒ [`IdentFragment`] +/// * `{:o}` ⇒ [`Octal`](`std::fmt::Octal`) +/// * `{:x}` ⇒ [`LowerHex`](`std::fmt::LowerHex`) +/// * `{:X}` ⇒ [`UpperHex`](`std::fmt::UpperHex`) +/// * `{:b}` ⇒ [`Binary`](`std::fmt::Binary`) +/// +/// See [`std::fmt`] for more information. +/// +///
+/// +/// # IdentFragment +/// +/// Unlike `format!`, this macro uses the [`IdentFragment`] formatting trait by +/// default. This trait is like `Display`, with a few differences: +/// +/// * `IdentFragment` is only implemented for a limited set of types, such as +/// unsigned integers and strings. +/// * [`Ident`] arguments will have their `r#` prefixes stripped, if present. +/// +/// [`Ident`]: `proc_macro2::Ident` +/// +///
+/// +/// # Hygiene +/// +/// The [`Span`] of the first `Ident` argument is used as the span of the final +/// identifier, falling back to [`Span::call_site`] when no identifiers are +/// provided. +/// +/// ``` +/// # use quote::format_ident; +/// # let ident = format_ident!("Ident"); +/// // If `ident` is an Ident, the span of `my_ident` will be inherited from it. +/// let my_ident = format_ident!("My{}{}", ident, "IsCool"); +/// assert_eq!(my_ident, "MyIdentIsCool"); +/// ``` +/// +/// Alternatively, the span can be overridden by passing the `span` named +/// argument. +/// +/// ``` +/// # use quote::format_ident; +/// # const IGNORE_TOKENS: &'static str = stringify! { +/// let my_span = /* ... */; +/// # }; +/// # let my_span = proc_macro2::Span::call_site(); +/// format_ident!("MyIdent", span = my_span); +/// ``` +/// +/// [`Span`]: `proc_macro2::Span` +/// [`Span::call_site`]: `proc_macro2::Span::call_site` +/// +///


+/// +/// # Panics +/// +/// This method will panic if the resulting formatted string is not a valid +/// identifier. +/// +///
+/// +/// # Examples +/// +/// Composing raw and non-raw identifiers: +/// ``` +/// # use quote::format_ident; +/// let my_ident = format_ident!("My{}", "Ident"); +/// assert_eq!(my_ident, "MyIdent"); +/// +/// let raw = format_ident!("r#Raw"); +/// assert_eq!(raw, "r#Raw"); +/// +/// let my_ident_raw = format_ident!("{}Is{}", my_ident, raw); +/// assert_eq!(my_ident_raw, "MyIdentIsRaw"); +/// ``` +/// +/// Integer formatting options: +/// ``` +/// # use quote::format_ident; +/// let num: u32 = 10; +/// +/// let decimal = format_ident!("Id_{}", num); +/// assert_eq!(decimal, "Id_10"); +/// +/// let octal = format_ident!("Id_{:o}", num); +/// assert_eq!(octal, "Id_12"); +/// +/// let binary = format_ident!("Id_{:b}", num); +/// assert_eq!(binary, "Id_1010"); +/// +/// let lower_hex = format_ident!("Id_{:x}", num); +/// assert_eq!(lower_hex, "Id_a"); +/// +/// let upper_hex = format_ident!("Id_{:X}", num); +/// assert_eq!(upper_hex, "Id_A"); +/// ``` +#[macro_export] +macro_rules! format_ident { + ($fmt:expr) => { + $crate::format_ident_impl!([ + ::std::option::Option::None, + $fmt + ]) + }; + + ($fmt:expr, $($rest:tt)*) => { + $crate::format_ident_impl!([ + ::std::option::Option::None, + $fmt + ] $($rest)*) + }; +} + +#[macro_export] +#[doc(hidden)] +macro_rules! format_ident_impl { + // Final state + ([$span:expr, $($fmt:tt)*]) => { + $crate::__rt::mk_ident(&format!($($fmt)*), $span) + }; + + // Span argument + ([$old:expr, $($fmt:tt)*] span = $span:expr) => { + $crate::format_ident_impl!([$old, $($fmt)*] span = $span,) + }; + ([$old:expr, $($fmt:tt)*] span = $span:expr, $($rest:tt)*) => { + $crate::format_ident_impl!([ + ::std::option::Option::Some::<$crate::__rt::Span>($span), + $($fmt)* + ] $($rest)*) + }; + + // Named argument + ([$span:expr, $($fmt:tt)*] $name:ident = $arg:expr) => { + $crate::format_ident_impl!([$span, $($fmt)*] $name = $arg,) + }; + ([$span:expr, $($fmt:tt)*] $name:ident = $arg:expr, $($rest:tt)*) => { + match $crate::__rt::IdentFragmentAdapter(&$arg) { + arg => $crate::format_ident_impl!([$span.or(arg.span()), $($fmt)*, $name = arg] $($rest)*), + } + }; + + // Positional argument + ([$span:expr, $($fmt:tt)*] $arg:expr) => { + $crate::format_ident_impl!([$span, $($fmt)*] $arg,) + }; + ([$span:expr, $($fmt:tt)*] $arg:expr, $($rest:tt)*) => { + match $crate::__rt::IdentFragmentAdapter(&$arg) { + arg => $crate::format_ident_impl!([$span.or(arg.span()), $($fmt)*, arg] $($rest)*), + } + }; +} --- /dev/null +++ b/vendor/quote/src/ident_fragment.rs @@ -0,0 +1,72 @@ +use proc_macro2::{Ident, Span}; +use std::fmt; + +/// Specialized formatting trait used by `format_ident!`. +/// +/// [`Ident`] arguments formatted using this trait will have their `r#` prefix +/// stripped, if present. +/// +/// See [`format_ident!`] for more information. +pub trait IdentFragment { + /// Format this value as an identifier fragment. + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result; + + /// Span associated with this `IdentFragment`. + /// + /// If non-`None`, may be inherited by formatted identifiers. + fn span(&self) -> Option { + None + } +} + +impl<'a, T: IdentFragment + ?Sized> IdentFragment for &'a T { + fn span(&self) -> Option { + ::span(*self) + } + + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + IdentFragment::fmt(*self, f) + } +} + +impl<'a, T: IdentFragment + ?Sized> IdentFragment for &'a mut T { + fn span(&self) -> Option { + ::span(*self) + } + + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + IdentFragment::fmt(*self, f) + } +} + +impl IdentFragment for Ident { + fn span(&self) -> Option { + Some(self.span()) + } + + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let id = self.to_string(); + if id.starts_with("r#") { + fmt::Display::fmt(&id[2..], f) + } else { + fmt::Display::fmt(&id[..], f) + } + } +} + +// Limited set of types which this is implemented for, as we want to avoid types +// which will often include non-identifier characters in their `Display` impl. +macro_rules! ident_fragment_display { + ($($T:ty),*) => { + $( + impl IdentFragment for $T { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } + } + )* + } +} + +ident_fragment_display!(bool, str, String); +ident_fragment_display!(u8, u16, u32, u64, u128, usize); --- a/vendor/quote/src/lib.rs +++ b/vendor/quote/src/lib.rs @@ -6,8 +6,8 @@ //! Procedural macros in Rust receive a stream of tokens as input, execute //! arbitrary Rust code to determine how to manipulate those tokens, and produce //! a stream of tokens to hand back to the compiler to compile into the caller's -//! crate. Quasi-quoting is a solution to one piece of that -- producing tokens -//! to return to the compiler. +//! crate. Quasi-quoting is a solution to one piece of that — producing +//! tokens to return to the compiler. //! //! The idea of quasi-quoting is that we write *code* that we treat as *data*. //! Within the `quote!` macro, we can write what looks like code to our text @@ -21,20 +21,12 @@ //! general-purpose Rust quasi-quoting library and is not specific to procedural //! macros. //! -//! *Version requirement: Quote supports any compiler version back to Rust's -//! very first support for procedural macros in Rust 1.15.0.* -//! //! ```toml //! [dependencies] -//! quote = "0.6" +//! quote = "1.0" //! ``` //! -//! ``` -//! #[macro_use] -//! extern crate quote; -//! # -//! # fn main() {} -//! ``` +//!
//! //! # Example //! @@ -49,21 +41,19 @@ //! [`quote_spanned!`]: macro.quote_spanned.html //! //! ``` -//! # #[macro_use] -//! # extern crate quote; +//! # use quote::quote; //! # -//! # fn main() { -//! # let generics = ""; -//! # let where_clause = ""; -//! # let field_ty = ""; -//! # let item_ty = ""; -//! # let path = ""; -//! # let value = ""; +//! # let generics = ""; +//! # let where_clause = ""; +//! # let field_ty = ""; +//! # let item_ty = ""; +//! # let path = ""; +//! # let value = ""; //! # //! let tokens = quote! { //! struct SerializeWith #generics #where_clause { //! value: &'a #field_ty, -//! phantom: ::std::marker::PhantomData<#item_ty>, +//! phantom: core::marker::PhantomData<#item_ty>, //! } //! //! impl #generics serde::Serialize for SerializeWith #generics #where_clause { @@ -77,170 +67,49 @@ //! //! SerializeWith { //! value: #value, -//! phantom: ::std::marker::PhantomData::<#item_ty>, +//! phantom: core::marker::PhantomData::<#item_ty>, //! } //! }; -//! # -//! # } //! ``` -//! -//! ## Recursion limit -//! -//! The `quote!` macro relies on deep recursion so some large invocations may -//! fail with "recursion limit reached" when you compile. If it fails, bump up -//! the recursion limit by adding `#![recursion_limit = "128"]` to your crate. -//! An even higher limit may be necessary for especially large invocations. // Quote types in rustdoc of other crates get linked to here. -#![doc(html_root_url = "https://docs.rs/quote/0.6.8")] +#![doc(html_root_url = "https://docs.rs/quote/1.0.2")] #[cfg(all( not(all(target_arch = "wasm32", target_os = "unknown")), feature = "proc-macro" ))] extern crate proc_macro; -extern crate proc_macro2; mod ext; -pub use ext::TokenStreamExt; - +mod format; +mod ident_fragment; mod to_tokens; -pub use to_tokens::ToTokens; // Not public API. #[doc(hidden)] -pub mod __rt { - use ext::TokenStreamExt; - pub use proc_macro2::*; - - fn is_ident_start(c: u8) -> bool { - (b'a' <= c && c <= b'z') || (b'A' <= c && c <= b'Z') || c == b'_' - } - - fn is_ident_continue(c: u8) -> bool { - (b'a' <= c && c <= b'z') - || (b'A' <= c && c <= b'Z') - || c == b'_' - || (b'0' <= c && c <= b'9') - } - - fn is_ident(token: &str) -> bool { - if token.bytes().all(|digit| digit >= b'0' && digit <= b'9') { - return false; - } +#[path = "runtime.rs"] +pub mod __rt; - let mut bytes = token.bytes(); - let first = bytes.next().unwrap(); - if !is_ident_start(first) { - return false; - } - for ch in bytes { - if !is_ident_continue(ch) { - return false; - } - } - true - } - - pub fn parse(tokens: &mut TokenStream, span: Span, s: &str) { - if is_ident(s) { - // Fast path, since idents are the most common token. - tokens.append(Ident::new(s, span)); - } else { - let s: TokenStream = s.parse().expect("invalid token stream"); - tokens.extend(s.into_iter().map(|mut t| { - t.set_span(span); - t - })); - } - } +pub use crate::ext::TokenStreamExt; +pub use crate::ident_fragment::IdentFragment; +pub use crate::to_tokens::ToTokens; - macro_rules! push_punct { - ($name:ident $char1:tt) => { - pub fn $name(tokens: &mut TokenStream, span: Span) { - let mut punct = Punct::new($char1, Spacing::Alone); - punct.set_span(span); - tokens.append(punct); - } - }; - ($name:ident $char1:tt $char2:tt) => { - pub fn $name(tokens: &mut TokenStream, span: Span) { - let mut punct = Punct::new($char1, Spacing::Joint); - punct.set_span(span); - tokens.append(punct); - let mut punct = Punct::new($char2, Spacing::Alone); - punct.set_span(span); - tokens.append(punct); - } - }; - ($name:ident $char1:tt $char2:tt $char3:tt) => { - pub fn $name(tokens: &mut TokenStream, span: Span) { - let mut punct = Punct::new($char1, Spacing::Joint); - punct.set_span(span); - tokens.append(punct); - let mut punct = Punct::new($char2, Spacing::Joint); - punct.set_span(span); - tokens.append(punct); - let mut punct = Punct::new($char3, Spacing::Alone); - punct.set_span(span); - tokens.append(punct); - } - }; - } - - push_punct!(push_add '+'); - push_punct!(push_add_eq '+' '='); - push_punct!(push_and '&'); - push_punct!(push_and_and '&' '&'); - push_punct!(push_and_eq '&' '='); - push_punct!(push_at '@'); - push_punct!(push_bang '!'); - push_punct!(push_caret '^'); - push_punct!(push_caret_eq '^' '='); - push_punct!(push_colon ':'); - push_punct!(push_colon2 ':' ':'); - push_punct!(push_comma ','); - push_punct!(push_div '/'); - push_punct!(push_div_eq '/' '='); - push_punct!(push_dot '.'); - push_punct!(push_dot2 '.' '.'); - push_punct!(push_dot3 '.' '.' '.'); - push_punct!(push_dot_dot_eq '.' '.' '='); - push_punct!(push_eq '='); - push_punct!(push_eq_eq '=' '='); - push_punct!(push_ge '>' '='); - push_punct!(push_gt '>'); - push_punct!(push_le '<' '='); - push_punct!(push_lt '<'); - push_punct!(push_mul_eq '*' '='); - push_punct!(push_ne '!' '='); - push_punct!(push_or '|'); - push_punct!(push_or_eq '|' '='); - push_punct!(push_or_or '|' '|'); - push_punct!(push_pound '#'); - push_punct!(push_question '?'); - push_punct!(push_rarrow '-' '>'); - push_punct!(push_larrow '<' '-'); - push_punct!(push_rem '%'); - push_punct!(push_rem_eq '%' '='); - push_punct!(push_fat_arrow '=' '>'); - push_punct!(push_semi ';'); - push_punct!(push_shl '<' '<'); - push_punct!(push_shl_eq '<' '<' '='); - push_punct!(push_shr '>' '>'); - push_punct!(push_shr_eq '>' '>' '='); - push_punct!(push_star '*'); - push_punct!(push_sub '-'); - push_punct!(push_sub_eq '-' '='); -} +// Not public API. +#[doc(hidden)] +pub mod spanned; /// The whole point. /// /// Performs variable interpolation against the input and produces it as -/// [`TokenStream`]. For returning tokens to the compiler in a procedural macro, use -/// `into()` to build a `TokenStream`. +/// [`proc_macro2::TokenStream`]. /// -/// [`TokenStream`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.TokenStream.html +/// Note: for returning tokens to the compiler in a procedural macro, use +/// `.into()` on the result to convert to [`proc_macro::TokenStream`]. +/// +/// [`TokenStream`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.TokenStream.html +/// +///
/// /// # Interpolation /// @@ -257,37 +126,71 @@ pub mod __rt { /// Repetition is done using `#(...)*` or `#(...),*` again similar to /// `macro_rules!`. This iterates through the elements of any variable /// interpolated within the repetition and inserts a copy of the repetition body -/// for each one. The variables in an interpolation may be anything that -/// implements `IntoIterator`, including `Vec` or a pre-existing iterator. +/// for each one. The variables in an interpolation may be a `Vec`, slice, +/// `BTreeSet`, or any `Iterator`. /// /// - `#(#var)*` — no separators /// - `#(#var),*` — the character before the asterisk is used as a separator /// - `#( struct #var; )*` — the repetition can contain other tokens /// - `#( #k => println!("{}", #v), )*` — even multiple interpolations /// +///
+/// /// # Hygiene /// /// Any interpolated tokens preserve the `Span` information provided by their /// `ToTokens` implementation. Tokens that originate within the `quote!` /// invocation are spanned with [`Span::call_site()`]. /// -/// [`Span::call_site()`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html#method.call_site +/// [`Span::call_site()`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.Span.html#method.call_site /// /// A different span can be provided through the [`quote_spanned!`] macro. /// /// [`quote_spanned!`]: macro.quote_spanned.html /// -/// # Example +///
+/// +/// # Return type +/// +/// The macro evaluates to an expression of type `proc_macro2::TokenStream`. +/// Meanwhile Rust procedural macros are expected to return the type +/// `proc_macro::TokenStream`. +/// +/// The difference between the two types is that `proc_macro` types are entirely +/// specific to procedural macros and cannot ever exist in code outside of a +/// procedural macro, while `proc_macro2` types may exist anywhere including +/// tests and non-macro code like main.rs and build.rs. This is why even the +/// procedural macro ecosystem is largely built around `proc_macro2`, because +/// that ensures the libraries are unit testable and accessible in non-macro +/// contexts. +/// +/// There is a [`From`]-conversion in both directions so returning the output of +/// `quote!` from a procedural macro usually looks like `tokens.into()` or +/// `proc_macro::TokenStream::from(tokens)`. +/// +/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html +/// +///
+/// +/// # Examples +/// +/// ### Procedural macro +/// +/// The structure of a basic procedural macro is as follows. Refer to the [Syn] +/// crate for further useful guidance on using `quote!` as part of a procedural +/// macro. +/// +/// [Syn]: https://github.com/dtolnay/syn /// /// ``` /// # #[cfg(any())] /// extern crate proc_macro; -/// # extern crate proc_macro2 as proc_macro; -/// -/// #[macro_use] -/// extern crate quote; +/// # extern crate proc_macro2; /// +/// # #[cfg(any())] /// use proc_macro::TokenStream; +/// # use proc_macro2::TokenStream; +/// use quote::quote; /// /// # const IGNORE_TOKENS: &'static str = stringify! { /// #[proc_macro_derive(HeapSize)] @@ -304,7 +207,7 @@ pub mod __rt { /// /// let expanded = quote! { /// // The generated impl. -/// impl ::heapsize::HeapSize for #name { +/// impl heapsize::HeapSize for #name { /// fn heap_size_of_children(&self) -> usize { /// #expr /// } @@ -312,35 +215,276 @@ pub mod __rt { /// }; /// /// // Hand the output tokens back to the compiler. -/// expanded.into() +/// TokenStream::from(expanded) +/// } +/// ``` +/// +///


+/// +/// ### Combining quoted fragments +/// +/// Usually you don't end up constructing an entire final `TokenStream` in one +/// piece. Different parts may come from different helper functions. The tokens +/// produced by `quote!` themselves implement `ToTokens` and so can be +/// interpolated into later `quote!` invocations to build up a final result. +/// +/// ``` +/// # use quote::quote; +/// # +/// let type_definition = quote! {...}; +/// let methods = quote! {...}; +/// +/// let tokens = quote! { +/// #type_definition +/// #methods +/// }; +/// ``` +/// +///


+/// +/// ### Constructing identifiers +/// +/// Suppose we have an identifier `ident` which came from somewhere in a macro +/// input and we need to modify it in some way for the macro output. Let's +/// consider prepending the identifier with an underscore. +/// +/// Simply interpolating the identifier next to an underscore will not have the +/// behavior of concatenating them. The underscore and the identifier will +/// continue to be two separate tokens as if you had written `_ x`. +/// +/// ``` +/// # use proc_macro2::{self as syn, Span}; +/// # use quote::quote; +/// # +/// # let ident = syn::Ident::new("i", Span::call_site()); +/// # +/// // incorrect +/// quote! { +/// let mut _#ident = 0; +/// } +/// # ; +/// ``` +/// +/// The solution is to build a new identifier token with the correct value. As +/// this is such a common case, the [`format_ident!`] macro provides a +/// convenient utility for doing so correctly. +/// +/// ``` +/// # use proc_macro2::{Ident, Span}; +/// # use quote::{format_ident, quote}; +/// # +/// # let ident = Ident::new("i", Span::call_site()); +/// # +/// let varname = format_ident!("_{}", ident); +/// quote! { +/// let mut #varname = 0; +/// } +/// # ; +/// ``` +/// +/// Alternatively, the APIs provided by Syn and proc-macro2 can be used to +/// directly build the identifier. This is roughly equivalent to the above, but +/// will not handle `ident` being a raw identifier. +/// +/// ``` +/// # use proc_macro2::{self as syn, Span}; +/// # use quote::quote; +/// # +/// # let ident = syn::Ident::new("i", Span::call_site()); +/// # +/// let concatenated = format!("_{}", ident); +/// let varname = syn::Ident::new(&concatenated, ident.span()); +/// quote! { +/// let mut #varname = 0; +/// } +/// # ; +/// ``` +/// +///


+/// +/// ### Making method calls +/// +/// Let's say our macro requires some type specified in the macro input to have +/// a constructor called `new`. We have the type in a variable called +/// `field_type` of type `syn::Type` and want to invoke the constructor. +/// +/// ``` +/// # use quote::quote; +/// # +/// # let field_type = quote!(...); +/// # +/// // incorrect +/// quote! { +/// let value = #field_type::new(); /// } +/// # ; +/// ``` +/// +/// This works only sometimes. If `field_type` is `String`, the expanded code +/// contains `String::new()` which is fine. But if `field_type` is something +/// like `Vec` then the expanded code is `Vec::new()` which is invalid +/// syntax. Ordinarily in handwritten Rust we would write `Vec::::new()` +/// but for macros often the following is more convenient. +/// +/// ``` +/// # use quote::quote; /// # -/// # fn main() {} +/// # let field_type = quote!(...); +/// # +/// quote! { +/// let value = <#field_type>::new(); +/// } +/// # ; /// ``` -#[macro_export(local_inner_macros)] +/// +/// This expands to `>::new()` which behaves correctly. +/// +/// A similar pattern is appropriate for trait methods. +/// +/// ``` +/// # use quote::quote; +/// # +/// # let field_type = quote!(...); +/// # +/// quote! { +/// let value = <#field_type as core::default::Default>::default(); +/// } +/// # ; +/// ``` +/// +///


+/// +/// ### Interpolating text inside of doc comments +/// +/// Neither doc comments nor string literals get interpolation behavior in +/// quote: +/// +/// ```compile_fail +/// quote! { +/// /// try to interpolate: #ident +/// /// +/// /// ... +/// } +/// ``` +/// +/// ```compile_fail +/// quote! { +/// #[doc = "try to interpolate: #ident"] +/// } +/// ``` +/// +/// Macro calls in a doc attribute are not valid syntax: +/// +/// ```compile_fail +/// quote! { +/// #[doc = concat!("try to interpolate: ", stringify!(#ident))] +/// } +/// ``` +/// +/// Instead the best way to build doc comments that involve variables is by +/// formatting the doc string literal outside of quote. +/// +/// ```rust +/// # use proc_macro2::{Ident, Span}; +/// # use quote::quote; +/// # +/// # const IGNORE: &str = stringify! { +/// let msg = format!(...); +/// # }; +/// # +/// # let ident = Ident::new("var", Span::call_site()); +/// # let msg = format!("try to interpolate: {}", ident); +/// quote! { +/// #[doc = #msg] +/// /// +/// /// ... +/// } +/// # ; +/// ``` +/// +///


+/// +/// ### Indexing into a tuple struct +/// +/// When interpolating indices of a tuple or tuple struct, we need them not to +/// appears suffixed as integer literals by interpolating them as [`syn::Index`] +/// instead. +/// +/// [`syn::Index`]: https://docs.rs/syn/1.0/syn/struct.Index.html +/// +/// ```compile_fail +/// let i = 0usize..self.fields.len(); +/// +/// // expands to 0 + self.0usize.heap_size() + self.1usize.heap_size() + ... +/// // which is not valid syntax +/// quote! { +/// 0 #( + self.#i.heap_size() )* +/// } +/// ``` +/// +/// ``` +/// # use proc_macro2::{Ident, TokenStream}; +/// # use quote::quote; +/// # +/// # mod syn { +/// # use proc_macro2::{Literal, TokenStream}; +/// # use quote::{ToTokens, TokenStreamExt}; +/// # +/// # pub struct Index(usize); +/// # +/// # impl From for Index { +/// # fn from(i: usize) -> Self { +/// # Index(i) +/// # } +/// # } +/// # +/// # impl ToTokens for Index { +/// # fn to_tokens(&self, tokens: &mut TokenStream) { +/// # tokens.append(Literal::usize_unsuffixed(self.0)); +/// # } +/// # } +/// # } +/// # +/// # struct Struct { +/// # fields: Vec, +/// # } +/// # +/// # impl Struct { +/// # fn example(&self) -> TokenStream { +/// let i = (0..self.fields.len()).map(syn::Index::from); +/// +/// // expands to 0 + self.0.heap_size() + self.1.heap_size() + ... +/// quote! { +/// 0 #( + self.#i.heap_size() )* +/// } +/// # } +/// # } +/// ``` +#[macro_export] macro_rules! quote { - ($($tt:tt)*) => (quote_spanned!($crate::__rt::Span::call_site()=> $($tt)*)); + ($($tt:tt)*) => { + $crate::quote_spanned!($crate::__rt::Span::call_site()=> $($tt)*) + }; } /// Same as `quote!`, but applies a given span to all tokens originating within /// the macro invocation. /// +///
+/// /// # Syntax /// /// A span expression of type [`Span`], followed by `=>`, followed by the tokens -/// to quote. The span expression should be brief -- use a variable for anything -/// more than a few characters. There should be no space before the `=>` token. +/// to quote. The span expression should be brief — use a variable for +/// anything more than a few characters. There should be no space before the +/// `=>` token. /// -/// [`Span`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html +/// [`Span`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.Span.html /// /// ``` -/// # #[macro_use] -/// # extern crate quote; -/// # extern crate proc_macro2; -/// # /// # use proc_macro2::Span; +/// # use quote::quote_spanned; /// # -/// # fn main() { /// # const IGNORE_TOKENS: &'static str = stringify! { /// let span = /* ... */; /// # }; @@ -354,7 +498,6 @@ macro_rules! quote { /// let tokens = quote_spanned! {span=> /// Box::into_raw(Box::new(#init)) /// }; -/// # } /// ``` /// /// The lack of space before the `=>` should look jarring to Rust programmers @@ -363,12 +506,16 @@ macro_rules! quote { /// being evaluated in the context of the procedural macro and the remaining /// tokens being evaluated in the generated code. /// +///
+/// /// # Hygiene /// /// Any interpolated tokens preserve the `Span` information provided by their /// `ToTokens` implementation. Tokens that originate within the `quote_spanned!` /// invocation are spanned with the given span argument. /// +///
+/// /// # Example /// /// The following procedural macro code uses `quote_spanned!` to assert that a @@ -378,11 +525,7 @@ macro_rules! quote { /// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html /// /// ``` -/// # #[macro_use] -/// # extern crate quote; -/// # extern crate proc_macro2; -/// # -/// # use quote::{TokenStreamExt, ToTokens}; +/// # use quote::{quote_spanned, TokenStreamExt, ToTokens}; /// # use proc_macro2::{Span, TokenStream}; /// # /// # struct Type; @@ -397,7 +540,6 @@ macro_rules! quote { /// # fn to_tokens(&self, _tokens: &mut TokenStream) {} /// # } /// # -/// # fn main() { /// # let ty = Type; /// # let call_site = Span::call_site(); /// # @@ -405,7 +547,6 @@ macro_rules! quote { /// let assert_sync = quote_spanned! {ty_span=> /// struct _AssertSync where #ty: Sync; /// }; -/// # } /// ``` /// /// If the assertion fails, the user will see an error like the following. The @@ -421,438 +562,387 @@ macro_rules! quote { /// /// In this example it is important for the where-clause to be spanned with the /// line/column information of the user's input type so that error messages are -/// placed appropriately by the compiler. But it is also incredibly important -/// that `Sync` resolves at the macro definition site and not the macro call -/// site. If we resolve `Sync` at the same span that the user's type is going to -/// be resolved, then they could bypass our check by defining their own trait -/// named `Sync` that is implemented for their type. -#[macro_export(local_inner_macros)] +/// placed appropriately by the compiler. +#[macro_export] macro_rules! quote_spanned { - ($span:expr=> $($tt:tt)*) => { - { - let mut _s = $crate::__rt::TokenStream::new(); - let _span = $span; - quote_each_token!(_s _span $($tt)*); - _s - } - }; + ($span:expr=> $($tt:tt)*) => {{ + let mut _s = $crate::__rt::TokenStream::new(); + let _span: $crate::__rt::Span = $span; + $crate::quote_each_token!(_s _span $($tt)*); + _s + }}; } -// Extract the names of all #metavariables and pass them to the $finish macro. +// Extract the names of all #metavariables and pass them to the $call macro. // -// in: pounded_var_names!(then () a #b c #( #d )* #e) -// out: then!(() b d e) -#[macro_export(local_inner_macros)] +// in: pounded_var_names!(then!(...) a #b c #( #d )* #e) +// out: then!(... b); +// then!(... d); +// then!(... e); +#[macro_export] #[doc(hidden)] macro_rules! pounded_var_names { - ($finish:ident ($($found:ident)*) # ( $($inner:tt)* ) $($rest:tt)*) => { - pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) - }; - - ($finish:ident ($($found:ident)*) # [ $($inner:tt)* ] $($rest:tt)*) => { - pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) - }; - - ($finish:ident ($($found:ident)*) # { $($inner:tt)* } $($rest:tt)*) => { - pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) + ($call:ident! $extra:tt $($tts:tt)*) => { + $crate::pounded_var_names_with_context!($call! $extra + (@ $($tts)*) + ($($tts)* @) + ) }; +} - ($finish:ident ($($found:ident)*) # $first:ident $($rest:tt)*) => { - pounded_var_names!($finish ($($found)* $first) $($rest)*) +#[macro_export] +#[doc(hidden)] +macro_rules! pounded_var_names_with_context { + ($call:ident! $extra:tt ($($b1:tt)*) ($($curr:tt)*)) => { + $( + $crate::pounded_var_with_context!($call! $extra $b1 $curr); + )* }; +} - ($finish:ident ($($found:ident)*) ( $($inner:tt)* ) $($rest:tt)*) => { - pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) +#[macro_export] +#[doc(hidden)] +macro_rules! pounded_var_with_context { + ($call:ident! $extra:tt $b1:tt ( $($inner:tt)* )) => { + $crate::pounded_var_names!($call! $extra $($inner)*); }; - ($finish:ident ($($found:ident)*) [ $($inner:tt)* ] $($rest:tt)*) => { - pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) + ($call:ident! $extra:tt $b1:tt [ $($inner:tt)* ]) => { + $crate::pounded_var_names!($call! $extra $($inner)*); }; - ($finish:ident ($($found:ident)*) { $($inner:tt)* } $($rest:tt)*) => { - pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) + ($call:ident! $extra:tt $b1:tt { $($inner:tt)* }) => { + $crate::pounded_var_names!($call! $extra $($inner)*); }; - ($finish:ident ($($found:ident)*) $ignore:tt $($rest:tt)*) => { - pounded_var_names!($finish ($($found)*) $($rest)*) + ($call:ident!($($extra:tt)*) # $var:ident) => { + $crate::$call!($($extra)* $var); }; - ($finish:ident ($($found:ident)*)) => { - $finish!(() $($found)*) - }; + ($call:ident! $extra:tt $b1:tt $curr:tt) => {}; } -// in: nested_tuples_pat!(() a b c d e) -// out: ((((a b) c) d) e) -// -// in: nested_tuples_pat!(() a) -// out: a -#[macro_export(local_inner_macros)] +#[macro_export] #[doc(hidden)] -macro_rules! nested_tuples_pat { - (()) => { - &() - }; - - (() $first:ident $($rest:ident)*) => { - nested_tuples_pat!(($first) $($rest)*) - }; - - (($pat:pat) $first:ident $($rest:ident)*) => { - nested_tuples_pat!((($pat, $first)) $($rest)*) - }; - - (($done:pat)) => { - $done +macro_rules! quote_bind_into_iter { + ($has_iter:ident $var:ident) => { + // `mut` may be unused if $var occurs multiple times in the list. + #[allow(unused_mut)] + let (mut $var, i) = $var.quote_into_iter(); + let $has_iter = $has_iter | i; }; } -// in: multi_zip_expr!(() a b c d e) -// out: a.into_iter().zip(b).zip(c).zip(d).zip(e) -// -// in: multi_zip_iter!(() a) -// out: a -#[macro_export(local_inner_macros)] +#[macro_export] #[doc(hidden)] -macro_rules! multi_zip_expr { - (()) => { - &[] - }; - - (() $single:ident) => { - $single - }; - - (() $first:ident $($rest:ident)*) => { - multi_zip_expr!(($first.into_iter()) $($rest)*) +macro_rules! quote_bind_next_or_break { + ($var:ident) => { + let $var = match $var.next() { + Some(_x) => $crate::__rt::RepInterp(_x), + None => break, + }; }; +} - (($zips:expr) $first:ident $($rest:ident)*) => { - multi_zip_expr!(($zips.zip($first)) $($rest)*) +#[macro_export] +#[doc(hidden)] +macro_rules! quote_each_token { + ($tokens:ident $span:ident $($tts:tt)*) => { + $crate::quote_tokens_with_context!($tokens $span + (@ @ @ @ @ @ $($tts)*) + (@ @ @ @ @ $($tts)* @) + (@ @ @ @ $($tts)* @ @) + (@ @ @ $(($tts))* @ @ @) + (@ @ $($tts)* @ @ @ @) + (@ $($tts)* @ @ @ @ @) + ($($tts)* @ @ @ @ @ @) + ); }; +} - (($done:expr)) => { - $done +#[macro_export] +#[doc(hidden)] +macro_rules! quote_tokens_with_context { + ($tokens:ident $span:ident + ($($b3:tt)*) ($($b2:tt)*) ($($b1:tt)*) + ($($curr:tt)*) + ($($a1:tt)*) ($($a2:tt)*) ($($a3:tt)*) + ) => { + $( + $crate::quote_token_with_context!($tokens $span $b3 $b2 $b1 $curr $a1 $a2 $a3); + )* }; } -#[macro_export(local_inner_macros)] +#[macro_export] #[doc(hidden)] -macro_rules! quote_each_token { - ($tokens:ident $span:ident) => {}; +macro_rules! quote_token_with_context { + ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt @ $a1:tt $a2:tt $a3:tt) => {}; - ($tokens:ident $span:ident # ! $($rest:tt)*) => { - quote_each_token!($tokens $span #); - quote_each_token!($tokens $span !); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident # ( $($inner:tt)* ) * $($rest:tt)*) => { - for pounded_var_names!(nested_tuples_pat () $($inner)*) - in pounded_var_names!(multi_zip_expr () $($inner)*) { - quote_each_token!($tokens $span $($inner)*); + ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) * $a3:tt) => {{ + use $crate::__rt::ext::*; + let has_iter = $crate::__rt::ThereIsNoIteratorInRepetition; + $crate::pounded_var_names!(quote_bind_into_iter!(has_iter) () $($inner)*); + let _: $crate::__rt::HasIterator = has_iter; + // This is `while true` instead of `loop` because if there are no + // iterators used inside of this repetition then the body would not + // contain any `break`, so the compiler would emit unreachable code + // warnings on anything below the loop. We use has_iter to detect and + // fail to compile when there are no iterators, so here we just work + // around the unneeded extra warning. + while true { + $crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*); + $crate::quote_each_token!($tokens $span $($inner)*); } - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident # ( $($inner:tt)* ) $sep:tt * $($rest:tt)*) => { - for (_i, pounded_var_names!(nested_tuples_pat () $($inner)*)) - in pounded_var_names!(multi_zip_expr () $($inner)*).into_iter().enumerate() { + }}; + ($tokens:ident $span:ident $b3:tt $b2:tt # (( $($inner:tt)* )) * $a2:tt $a3:tt) => {}; + ($tokens:ident $span:ident $b3:tt # ( $($inner:tt)* ) (*) $a1:tt $a2:tt $a3:tt) => {}; + + ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) $sep:tt *) => {{ + use $crate::__rt::ext::*; + let mut _i = 0usize; + let has_iter = $crate::__rt::ThereIsNoIteratorInRepetition; + $crate::pounded_var_names!(quote_bind_into_iter!(has_iter) () $($inner)*); + let _: $crate::__rt::HasIterator = has_iter; + while true { + $crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*); if _i > 0 { - quote_each_token!($tokens $span $sep); + $crate::quote_token!($tokens $span $sep); } - quote_each_token!($tokens $span $($inner)*); + _i += 1; + $crate::quote_each_token!($tokens $span $($inner)*); } - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident # [ $($inner:tt)* ] $($rest:tt)*) => { - quote_each_token!($tokens $span #); - $tokens.extend({ - let mut g = $crate::__rt::Group::new( - $crate::__rt::Delimiter::Bracket, - quote_spanned!($span=> $($inner)*), - ); - g.set_span($span); - Some($crate::__rt::TokenTree::from(g)) - }); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident # $first:ident $($rest:tt)*) => { - $crate::ToTokens::to_tokens(&$first, &mut $tokens); - quote_each_token!($tokens $span $($rest)*); + }}; + ($tokens:ident $span:ident $b3:tt $b2:tt # (( $($inner:tt)* )) $sep:tt * $a3:tt) => {}; + ($tokens:ident $span:ident $b3:tt # ( $($inner:tt)* ) ($sep:tt) * $a2:tt $a3:tt) => {}; + ($tokens:ident $span:ident # ( $($inner:tt)* ) * (*) $a1:tt $a2:tt $a3:tt) => { + // https://github.com/dtolnay/quote/issues/130 + $crate::quote_token!($tokens $span *); + }; + ($tokens:ident $span:ident # ( $($inner:tt)* ) $sep:tt (*) $a1:tt $a2:tt $a3:tt) => {}; + + ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) $var:ident $a2:tt $a3:tt) => { + $crate::ToTokens::to_tokens(&$var, &mut $tokens); + }; + ($tokens:ident $span:ident $b3:tt $b2:tt # ($var:ident) $a1:tt $a2:tt $a3:tt) => {}; + ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt ($curr:tt) $a1:tt $a2:tt $a3:tt) => { + $crate::quote_token!($tokens $span $curr); }; +} - ($tokens:ident $span:ident ( $($first:tt)* ) $($rest:tt)*) => { +#[macro_export] +#[doc(hidden)] +macro_rules! quote_token { + ($tokens:ident $span:ident ( $($inner:tt)* )) => { $tokens.extend({ let mut g = $crate::__rt::Group::new( $crate::__rt::Delimiter::Parenthesis, - quote_spanned!($span=> $($first)*), + $crate::quote_spanned!($span=> $($inner)*), ); g.set_span($span); Some($crate::__rt::TokenTree::from(g)) }); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident [ $($first:tt)* ] $($rest:tt)*) => { + ($tokens:ident $span:ident [ $($inner:tt)* ]) => { $tokens.extend({ let mut g = $crate::__rt::Group::new( $crate::__rt::Delimiter::Bracket, - quote_spanned!($span=> $($first)*), + $crate::quote_spanned!($span=> $($inner)*), ); g.set_span($span); Some($crate::__rt::TokenTree::from(g)) }); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident { $($first:tt)* } $($rest:tt)*) => { + ($tokens:ident $span:ident { $($inner:tt)* }) => { $tokens.extend({ let mut g = $crate::__rt::Group::new( $crate::__rt::Delimiter::Brace, - quote_spanned!($span=> $($first)*), + $crate::quote_spanned!($span=> $($inner)*), ); g.set_span($span); Some($crate::__rt::TokenTree::from(g)) }); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident + $($rest:tt)*) => { + ($tokens:ident $span:ident +) => { $crate::__rt::push_add(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident += $($rest:tt)*) => { + ($tokens:ident $span:ident +=) => { $crate::__rt::push_add_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident & $($rest:tt)*) => { + ($tokens:ident $span:ident &) => { $crate::__rt::push_and(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident && $($rest:tt)*) => { + ($tokens:ident $span:ident &&) => { $crate::__rt::push_and_and(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident &= $($rest:tt)*) => { + ($tokens:ident $span:ident &=) => { $crate::__rt::push_and_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident @ $($rest:tt)*) => { + ($tokens:ident $span:ident @) => { $crate::__rt::push_at(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident ! $($rest:tt)*) => { + ($tokens:ident $span:ident !) => { $crate::__rt::push_bang(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident ^ $($rest:tt)*) => { + ($tokens:ident $span:ident ^) => { $crate::__rt::push_caret(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident ^= $($rest:tt)*) => { + ($tokens:ident $span:ident ^=) => { $crate::__rt::push_caret_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident : $($rest:tt)*) => { + ($tokens:ident $span:ident :) => { $crate::__rt::push_colon(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident :: $($rest:tt)*) => { + ($tokens:ident $span:ident ::) => { $crate::__rt::push_colon2(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident , $($rest:tt)*) => { + ($tokens:ident $span:ident ,) => { $crate::__rt::push_comma(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident / $($rest:tt)*) => { + ($tokens:ident $span:ident /) => { $crate::__rt::push_div(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident /= $($rest:tt)*) => { + ($tokens:ident $span:ident /=) => { $crate::__rt::push_div_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident . $($rest:tt)*) => { + ($tokens:ident $span:ident .) => { $crate::__rt::push_dot(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident .. $($rest:tt)*) => { + ($tokens:ident $span:ident ..) => { $crate::__rt::push_dot2(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident ... $($rest:tt)*) => { + ($tokens:ident $span:ident ...) => { $crate::__rt::push_dot3(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident ..= $($rest:tt)*) => { + ($tokens:ident $span:ident ..=) => { $crate::__rt::push_dot_dot_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident = $($rest:tt)*) => { + ($tokens:ident $span:ident =) => { $crate::__rt::push_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident == $($rest:tt)*) => { + ($tokens:ident $span:ident ==) => { $crate::__rt::push_eq_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident >= $($rest:tt)*) => { + ($tokens:ident $span:ident >=) => { $crate::__rt::push_ge(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident > $($rest:tt)*) => { + ($tokens:ident $span:ident >) => { $crate::__rt::push_gt(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident <= $($rest:tt)*) => { + ($tokens:ident $span:ident <=) => { $crate::__rt::push_le(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident < $($rest:tt)*) => { + ($tokens:ident $span:ident <) => { $crate::__rt::push_lt(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident *= $($rest:tt)*) => { + ($tokens:ident $span:ident *=) => { $crate::__rt::push_mul_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident != $($rest:tt)*) => { + ($tokens:ident $span:ident !=) => { $crate::__rt::push_ne(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident | $($rest:tt)*) => { + ($tokens:ident $span:ident |) => { $crate::__rt::push_or(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident |= $($rest:tt)*) => { + ($tokens:ident $span:ident |=) => { $crate::__rt::push_or_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident || $($rest:tt)*) => { + ($tokens:ident $span:ident ||) => { $crate::__rt::push_or_or(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident # $($rest:tt)*) => { + ($tokens:ident $span:ident #) => { $crate::__rt::push_pound(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident ? $($rest:tt)*) => { + ($tokens:ident $span:ident ?) => { $crate::__rt::push_question(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident -> $($rest:tt)*) => { + ($tokens:ident $span:ident ->) => { $crate::__rt::push_rarrow(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident <- $($rest:tt)*) => { + ($tokens:ident $span:ident <-) => { $crate::__rt::push_larrow(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident % $($rest:tt)*) => { + ($tokens:ident $span:ident %) => { $crate::__rt::push_rem(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident %= $($rest:tt)*) => { + ($tokens:ident $span:ident %=) => { $crate::__rt::push_rem_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident => $($rest:tt)*) => { + ($tokens:ident $span:ident =>) => { $crate::__rt::push_fat_arrow(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident ; $($rest:tt)*) => { + ($tokens:ident $span:ident ;) => { $crate::__rt::push_semi(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident << $($rest:tt)*) => { + ($tokens:ident $span:ident <<) => { $crate::__rt::push_shl(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident <<= $($rest:tt)*) => { + ($tokens:ident $span:ident <<=) => { $crate::__rt::push_shl_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident >> $($rest:tt)*) => { + ($tokens:ident $span:ident >>) => { $crate::__rt::push_shr(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident >>= $($rest:tt)*) => { + ($tokens:ident $span:ident >>=) => { $crate::__rt::push_shr_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident * $($rest:tt)*) => { + ($tokens:ident $span:ident *) => { $crate::__rt::push_star(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident - $($rest:tt)*) => { + ($tokens:ident $span:ident -) => { $crate::__rt::push_sub(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); }; - ($tokens:ident $span:ident -= $($rest:tt)*) => { + ($tokens:ident $span:ident -=) => { $crate::__rt::push_sub_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident $first:tt $($rest:tt)*) => { - $crate::__rt::parse(&mut $tokens, $span, quote_stringify!($first)); - quote_each_token!($tokens $span $($rest)*); }; -} -// Unhygienically invoke whatever `stringify` the caller has in scope i.e. not a -// local macro. The macros marked `local_inner_macros` above cannot invoke -// `stringify` directly. -#[macro_export] -#[doc(hidden)] -macro_rules! quote_stringify { - ($tt:tt) => { - stringify!($tt) + ($tokens:ident $span:ident $other:tt) => { + $crate::__rt::parse(&mut $tokens, $span, stringify!($other)); }; } --- /dev/null +++ b/vendor/quote/src/runtime.rs @@ -0,0 +1,373 @@ +use crate::{IdentFragment, ToTokens, TokenStreamExt}; +use std::fmt; +use std::ops::BitOr; + +pub use proc_macro2::*; + +pub struct HasIterator; // True +pub struct ThereIsNoIteratorInRepetition; // False + +impl BitOr for ThereIsNoIteratorInRepetition { + type Output = ThereIsNoIteratorInRepetition; + fn bitor(self, _rhs: ThereIsNoIteratorInRepetition) -> ThereIsNoIteratorInRepetition { + ThereIsNoIteratorInRepetition + } +} + +impl BitOr for HasIterator { + type Output = HasIterator; + fn bitor(self, _rhs: ThereIsNoIteratorInRepetition) -> HasIterator { + HasIterator + } +} + +impl BitOr for ThereIsNoIteratorInRepetition { + type Output = HasIterator; + fn bitor(self, _rhs: HasIterator) -> HasIterator { + HasIterator + } +} + +impl BitOr for HasIterator { + type Output = HasIterator; + fn bitor(self, _rhs: HasIterator) -> HasIterator { + HasIterator + } +} + +/// Extension traits used by the implementation of `quote!`. These are defined +/// in separate traits, rather than as a single trait due to ambiguity issues. +/// +/// These traits expose a `quote_into_iter` method which should allow calling +/// whichever impl happens to be applicable. Calling that method repeatedly on +/// the returned value should be idempotent. +pub mod ext { + use super::RepInterp; + use super::{HasIterator as HasIter, ThereIsNoIteratorInRepetition as DoesNotHaveIter}; + use crate::ToTokens; + use std::collections::btree_set::{self, BTreeSet}; + use std::slice; + + /// Extension trait providing the `quote_into_iter` method on iterators. + pub trait RepIteratorExt: Iterator + Sized { + fn quote_into_iter(self) -> (Self, HasIter) { + (self, HasIter) + } + } + + impl RepIteratorExt for T {} + + /// Extension trait providing the `quote_into_iter` method for + /// non-iterable types. These types interpolate the same value in each + /// iteration of the repetition. + pub trait RepToTokensExt { + /// Pretend to be an iterator for the purposes of `quote_into_iter`. + /// This allows repeated calls to `quote_into_iter` to continue + /// correctly returning DoesNotHaveIter. + fn next(&self) -> Option<&Self> { + Some(self) + } + + fn quote_into_iter(&self) -> (&Self, DoesNotHaveIter) { + (self, DoesNotHaveIter) + } + } + + impl RepToTokensExt for T {} + + /// Extension trait providing the `quote_into_iter` method for types that + /// can be referenced as an iterator. + pub trait RepAsIteratorExt<'q> { + type Iter: Iterator; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIter); + } + + impl<'q, 'a, T: RepAsIteratorExt<'q> + ?Sized> RepAsIteratorExt<'q> for &'a T { + type Iter = T::Iter; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIter) { + ::quote_into_iter(*self) + } + } + + impl<'q, 'a, T: RepAsIteratorExt<'q> + ?Sized> RepAsIteratorExt<'q> for &'a mut T { + type Iter = T::Iter; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIter) { + ::quote_into_iter(*self) + } + } + + impl<'q, T: 'q> RepAsIteratorExt<'q> for [T] { + type Iter = slice::Iter<'q, T>; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIter) { + (self.iter(), HasIter) + } + } + + impl<'q, T: 'q> RepAsIteratorExt<'q> for Vec { + type Iter = slice::Iter<'q, T>; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIter) { + (self.iter(), HasIter) + } + } + + impl<'q, T: 'q> RepAsIteratorExt<'q> for BTreeSet { + type Iter = btree_set::Iter<'q, T>; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIter) { + (self.iter(), HasIter) + } + } + + macro_rules! array_rep_slice { + ($($l:tt)*) => { + $( + impl<'q, T: 'q> RepAsIteratorExt<'q> for [T; $l] { + type Iter = slice::Iter<'q, T>; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIter) { + (self.iter(), HasIter) + } + } + )* + } + } + + array_rep_slice!( + 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 + 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 + ); + + impl<'q, T: RepAsIteratorExt<'q>> RepAsIteratorExt<'q> for RepInterp { + type Iter = T::Iter; + + fn quote_into_iter(&'q self) -> (Self::Iter, HasIter) { + self.0.quote_into_iter() + } + } +} + +// Helper type used within interpolations to allow for repeated binding names. +// Implements the relevant traits, and exports a dummy `next()` method. +#[derive(Copy, Clone)] +pub struct RepInterp(pub T); + +impl RepInterp { + // This method is intended to look like `Iterator::next`, and is called when + // a name is bound multiple times, as the previous binding will shadow the + // original `Iterator` object. This allows us to avoid advancing the + // iterator multiple times per iteration. + pub fn next(self) -> Option { + Some(self.0) + } +} + +impl Iterator for RepInterp { + type Item = T::Item; + + fn next(&mut self) -> Option { + self.0.next() + } +} + +impl ToTokens for RepInterp { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.0.to_tokens(tokens); + } +} + +fn is_ident_start(c: u8) -> bool { + (b'a' <= c && c <= b'z') || (b'A' <= c && c <= b'Z') || c == b'_' +} + +fn is_ident_continue(c: u8) -> bool { + (b'a' <= c && c <= b'z') || (b'A' <= c && c <= b'Z') || c == b'_' || (b'0' <= c && c <= b'9') +} + +fn is_ident(token: &str) -> bool { + let mut iter = token.bytes(); + let first_ok = iter.next().map(is_ident_start).unwrap_or(false); + + first_ok && iter.all(is_ident_continue) +} + +pub fn parse(tokens: &mut TokenStream, span: Span, s: &str) { + if is_ident(s) { + // Fast path, since idents are the most common token. + tokens.append(Ident::new(s, span)); + } else { + let s: TokenStream = s.parse().expect("invalid token stream"); + tokens.extend(s.into_iter().map(|mut t| { + t.set_span(span); + t + })); + } +} + +macro_rules! push_punct { + ($name:ident $char1:tt) => { + pub fn $name(tokens: &mut TokenStream, span: Span) { + let mut punct = Punct::new($char1, Spacing::Alone); + punct.set_span(span); + tokens.append(punct); + } + }; + ($name:ident $char1:tt $char2:tt) => { + pub fn $name(tokens: &mut TokenStream, span: Span) { + let mut punct = Punct::new($char1, Spacing::Joint); + punct.set_span(span); + tokens.append(punct); + let mut punct = Punct::new($char2, Spacing::Alone); + punct.set_span(span); + tokens.append(punct); + } + }; + ($name:ident $char1:tt $char2:tt $char3:tt) => { + pub fn $name(tokens: &mut TokenStream, span: Span) { + let mut punct = Punct::new($char1, Spacing::Joint); + punct.set_span(span); + tokens.append(punct); + let mut punct = Punct::new($char2, Spacing::Joint); + punct.set_span(span); + tokens.append(punct); + let mut punct = Punct::new($char3, Spacing::Alone); + punct.set_span(span); + tokens.append(punct); + } + }; +} + +push_punct!(push_add '+'); +push_punct!(push_add_eq '+' '='); +push_punct!(push_and '&'); +push_punct!(push_and_and '&' '&'); +push_punct!(push_and_eq '&' '='); +push_punct!(push_at '@'); +push_punct!(push_bang '!'); +push_punct!(push_caret '^'); +push_punct!(push_caret_eq '^' '='); +push_punct!(push_colon ':'); +push_punct!(push_colon2 ':' ':'); +push_punct!(push_comma ','); +push_punct!(push_div '/'); +push_punct!(push_div_eq '/' '='); +push_punct!(push_dot '.'); +push_punct!(push_dot2 '.' '.'); +push_punct!(push_dot3 '.' '.' '.'); +push_punct!(push_dot_dot_eq '.' '.' '='); +push_punct!(push_eq '='); +push_punct!(push_eq_eq '=' '='); +push_punct!(push_ge '>' '='); +push_punct!(push_gt '>'); +push_punct!(push_le '<' '='); +push_punct!(push_lt '<'); +push_punct!(push_mul_eq '*' '='); +push_punct!(push_ne '!' '='); +push_punct!(push_or '|'); +push_punct!(push_or_eq '|' '='); +push_punct!(push_or_or '|' '|'); +push_punct!(push_pound '#'); +push_punct!(push_question '?'); +push_punct!(push_rarrow '-' '>'); +push_punct!(push_larrow '<' '-'); +push_punct!(push_rem '%'); +push_punct!(push_rem_eq '%' '='); +push_punct!(push_fat_arrow '=' '>'); +push_punct!(push_semi ';'); +push_punct!(push_shl '<' '<'); +push_punct!(push_shl_eq '<' '<' '='); +push_punct!(push_shr '>' '>'); +push_punct!(push_shr_eq '>' '>' '='); +push_punct!(push_star '*'); +push_punct!(push_sub '-'); +push_punct!(push_sub_eq '-' '='); + +// Helper method for constructing identifiers from the `format_ident!` macro, +// handling `r#` prefixes. +// +// Directly parsing the input string may produce a valid identifier, +// although the input string was invalid, due to ignored characters such as +// whitespace and comments. Instead, we always create a non-raw identifier +// to validate that the string is OK, and only parse again if needed. +// +// The `is_ident` method defined above is insufficient for validation, as it +// will reject non-ASCII identifiers. +pub fn mk_ident(id: &str, span: Option) -> Ident { + let span = span.unwrap_or_else(Span::call_site); + + let is_raw = id.starts_with("r#"); + let unraw = Ident::new(if is_raw { &id[2..] } else { id }, span); + if !is_raw { + return unraw; + } + + // At this point, the identifier is raw, and the unraw-ed version of it was + // successfully converted into an identifier. Try to produce a valid raw + // identifier by running the `TokenStream` parser, and unwrapping the first + // token as an `Ident`. + // + // FIXME: When `Ident::new_raw` becomes stable, this method should be + // updated to call it when available. + match id.parse::() { + Ok(ts) => { + let mut iter = ts.into_iter(); + match (iter.next(), iter.next()) { + (Some(TokenTree::Ident(mut id)), None) => { + id.set_span(span); + id + } + _ => unreachable!("valid raw ident fails to parse"), + } + } + Err(_) => unreachable!("valid raw ident fails to parse"), + } +} + +// Adapts from `IdentFragment` to `fmt::Display` for use by the `format_ident!` +// macro, and exposes span information from these fragments. +// +// This struct also has forwarding implementations of the formatting traits +// `Octal`, `LowerHex`, `UpperHex`, and `Binary` to allow for their use within +// `format_ident!`. +#[derive(Copy, Clone)] +pub struct IdentFragmentAdapter(pub T); + +impl IdentFragmentAdapter { + pub fn span(&self) -> Option { + self.0.span() + } +} + +impl fmt::Display for IdentFragmentAdapter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + IdentFragment::fmt(&self.0, f) + } +} + +impl fmt::Octal for IdentFragmentAdapter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Octal::fmt(&self.0, f) + } +} + +impl fmt::LowerHex for IdentFragmentAdapter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::LowerHex::fmt(&self.0, f) + } +} + +impl fmt::UpperHex for IdentFragmentAdapter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::UpperHex::fmt(&self.0, f) + } +} + +impl fmt::Binary for IdentFragmentAdapter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Binary::fmt(&self.0, f) + } +} --- /dev/null +++ b/vendor/quote/src/spanned.rs @@ -0,0 +1,42 @@ +use crate::ToTokens; +use proc_macro2::{Span, TokenStream}; + +pub trait Spanned { + fn __span(&self) -> Span; +} + +impl Spanned for Span { + fn __span(&self) -> Span { + *self + } +} + +impl Spanned for T { + fn __span(&self) -> Span { + join_spans(self.into_token_stream()) + } +} + +fn join_spans(tokens: TokenStream) -> Span { + let mut iter = tokens.into_iter().filter_map(|tt| { + // FIXME: This shouldn't be required, since optimally spans should + // never be invalid. This filter_map can probably be removed when + // https://github.com/rust-lang/rust/issues/43081 is resolved. + let span = tt.span(); + let debug = format!("{:?}", span); + if debug.ends_with("bytes(0..0)") { + None + } else { + Some(span) + } + }); + + let first = match iter.next() { + Some(span) => span, + None => return Span::call_site(), + }; + + iter.fold(None, |_prev, next| Some(next)) + .and_then(|last| first.join(last)) + .unwrap_or(first) +} --- a/vendor/quote/src/to_tokens.rs +++ b/vendor/quote/src/to_tokens.rs @@ -2,10 +2,11 @@ use super::TokenStreamExt; use std::borrow::Cow; use std::iter; +use std::rc::Rc; use proc_macro2::{Group, Ident, Literal, Punct, Span, TokenStream, TokenTree}; -/// Types that can be interpolated inside a [`quote!`] invocation. +/// Types that can be interpolated inside a `quote!` invocation. /// /// [`quote!`]: macro.quote.html pub trait ToTokens { @@ -22,11 +23,8 @@ pub trait ToTokens { /// `std::cmp::PartialEq`: /// /// ``` - /// extern crate quote; - /// use quote::{TokenStreamExt, ToTokens}; - /// - /// extern crate proc_macro2; /// use proc_macro2::{TokenTree, Spacing, Span, Punct, TokenStream}; + /// use quote::{TokenStreamExt, ToTokens}; /// /// pub struct Path { /// pub global: bool, @@ -53,8 +51,6 @@ pub trait ToTokens { /// # unimplemented!() /// # } /// # } - /// # - /// # fn main() {} /// ``` fn to_tokens(&self, tokens: &mut TokenStream); @@ -62,13 +58,21 @@ pub trait ToTokens { /// /// This method is implicitly implemented using `to_tokens`, and acts as a /// convenience method for consumers of the `ToTokens` trait. + fn to_token_stream(&self) -> TokenStream { + let mut tokens = TokenStream::new(); + self.to_tokens(&mut tokens); + tokens + } + + /// Convert `self` directly into a `TokenStream` object. + /// + /// This method is implicitly implemented using `to_tokens`, and acts as a + /// convenience method for consumers of the `ToTokens` trait. fn into_token_stream(self) -> TokenStream where Self: Sized, { - let mut tokens = TokenStream::new(); - self.to_tokens(&mut tokens); - tokens + self.to_token_stream() } } @@ -96,6 +100,12 @@ impl ToTokens for } } +impl ToTokens for Rc { + fn to_tokens(&self, tokens: &mut TokenStream) { + (**self).to_tokens(tokens); + } +} + impl ToTokens for Option { fn to_tokens(&self, tokens: &mut TokenStream) { if let Some(ref t) = *self { @@ -131,12 +141,14 @@ primitive! { i16 => i16_suffixed i32 => i32_suffixed i64 => i64_suffixed + i128 => i128_suffixed isize => isize_suffixed u8 => u8_suffixed u16 => u16_suffixed u32 => u32_suffixed u64 => u64_suffixed + u128 => u128_suffixed usize => usize_suffixed f32 => f32_suffixed --- /dev/null +++ b/vendor/quote/tests/compiletest.rs @@ -0,0 +1,6 @@ +#[rustversion::attr(not(nightly), ignore)] +#[test] +fn ui() { + let t = trybuild::TestCases::new(); + t.compile_fail("tests/ui/*.rs"); +} --- a/vendor/quote/tests/test.rs +++ b/vendor/quote/tests/test.rs @@ -1,13 +1,10 @@ #![cfg_attr(feature = "cargo-clippy", allow(blacklisted_name))] use std::borrow::Cow; - -extern crate proc_macro2; -#[macro_use] -extern crate quote; +use std::collections::BTreeSet; use proc_macro2::{Ident, Span, TokenStream}; -use quote::TokenStreamExt; +use quote::{format_ident, quote, TokenStreamExt}; struct X; @@ -120,18 +117,20 @@ fn test_integer() { let ii16 = -1i16; let ii32 = -1i32; let ii64 = -1i64; + let ii128 = -1i128; let iisize = -1isize; let uu8 = 1u8; let uu16 = 1u16; let uu32 = 1u32; let uu64 = 1u64; + let uu128 = 1u128; let uusize = 1usize; let tokens = quote! { - #ii8 #ii16 #ii32 #ii64 #iisize - #uu8 #uu16 #uu32 #uu64 #uusize + #ii8 #ii16 #ii32 #ii64 #ii128 #iisize + #uu8 #uu16 #uu32 #uu64 #uu128 #uusize }; - let expected = "-1i8 -1i16 -1i32 -1i64 -1isize 1u8 1u16 1u32 1u64 1usize"; + let expected = "-1i8 -1i16 -1i32 -1i64 -1i128 -1isize 1u8 1u16 1u32 1u64 1u128 1usize"; assert_eq!(expected, tokens.to_string()); } @@ -161,7 +160,7 @@ fn test_char() { let tokens = quote! { #zero #pound #quote #apost #newline #heart }; - let expected = "'\\u{0}' '#' '\\\"' '\\'' '\\n' '\\u{2764}'"; + let expected = "'\\u{0}' '#' '\"' '\\'' '\\n' '\\u{2764}'"; assert_eq!(expected, tokens.to_string()); } @@ -169,7 +168,7 @@ fn test_char() { fn test_str() { let s = "\0 a 'b \" c"; let tokens = quote!(#s); - let expected = "\"\\u{0} a \\'b \\\" c\""; + let expected = "\"\\u{0} a 'b \\\" c\""; assert_eq!(expected, tokens.to_string()); } @@ -177,7 +176,7 @@ fn test_str() { fn test_string() { let s = "\0 a 'b \" c".to_string(); let tokens = quote!(#s); - let expected = "\"\\u{0} a \\'b \\\" c\""; + let expected = "\"\\u{0} a 'b \\\" c\""; assert_eq!(expected, tokens.to_string()); } @@ -228,9 +227,42 @@ fn test_nested_fancy_repetition() { } #[test] -fn test_empty_repetition() { - let tokens = quote!(#(a b)* #(c d),*); - assert_eq!("", tokens.to_string()); +fn test_duplicate_name_repetition() { + let foo = &["a", "b"]; + + let tokens = quote! { + #(#foo: #foo),* + #(#foo: #foo),* + }; + + let expected = r#""a" : "a" , "b" : "b" "a" : "a" , "b" : "b""#; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_duplicate_name_repetition_no_copy() { + let foo = vec!["a".to_owned(), "b".to_owned()]; + + let tokens = quote! { + #(#foo: #foo),* + }; + + let expected = r#""a" : "a" , "b" : "b""#; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_btreeset_repetition() { + let mut set = BTreeSet::new(); + set.insert("a".to_owned()); + set.insert("b".to_owned()); + + let tokens = quote! { + #(#set: #set),* + }; + + let expected = r#""a" : "a" , "b" : "b""#; + assert_eq!(expected, tokens.to_string()); } #[test] @@ -244,6 +276,19 @@ fn test_variable_name_conflict() { } #[test] +fn test_nonrep_in_repetition() { + let rep = vec!["a", "b"]; + let nonrep = "c"; + + let tokens = quote! { + #(#rep #rep : #nonrep #nonrep),* + }; + + let expected = r#""a" "a" : "c" "c" , "b" "b" : "c" "c""#; + assert_eq!(expected, tokens.to_string()); +} + +#[test] fn test_empty_quote() { let tokens = quote!(); assert_eq!("", tokens.to_string()); @@ -270,7 +315,7 @@ fn test_cow() { #[test] fn test_closure() { fn field_i(i: usize) -> Ident { - Ident::new(&format!("__field{}", i), Span::call_site()) + format_ident!("__field{}", i) } let fields = (0usize..3) @@ -288,3 +333,97 @@ fn test_append_tokens() { a.append_all(b); assert_eq!("a b", a.to_string()); } + +#[test] +fn test_format_ident() { + let id0 = format_ident!("Aa"); + let id1 = format_ident!("Hello{x}", x = id0); + let id2 = format_ident!("Hello{x}", x = 5usize); + let id3 = format_ident!("Hello{}_{x}", id0, x = 10usize); + let id4 = format_ident!("Aa", span = Span::call_site()); + + assert_eq!(id0, "Aa"); + assert_eq!(id1, "HelloAa"); + assert_eq!(id2, "Hello5"); + assert_eq!(id3, "HelloAa_10"); + assert_eq!(id4, "Aa"); +} + +#[test] +fn test_format_ident_strip_raw() { + let id = format_ident!("r#struct"); + let my_id = format_ident!("MyId{}", id); + let raw_my_id = format_ident!("r#MyId{}", id); + + assert_eq!(id, "r#struct"); + assert_eq!(my_id, "MyIdstruct"); + assert_eq!(raw_my_id, "r#MyIdstruct"); +} + +#[test] +fn test_outer_line_comment() { + let tokens = quote! { + /// doc + }; + let expected = "# [ doc = r\" doc\" ]"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_inner_line_comment() { + let tokens = quote! { + //! doc + }; + let expected = "# ! [ doc = r\" doc\" ]"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_outer_block_comment() { + let tokens = quote! { + /** doc */ + }; + let expected = "# [ doc = r\" doc \" ]"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_inner_block_comment() { + let tokens = quote! { + /*! doc */ + }; + let expected = "# ! [ doc = r\" doc \" ]"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_outer_attr() { + let tokens = quote! { + #[inline] + }; + let expected = "# [ inline ]"; + assert_eq!(expected, tokens.to_string()); +} + +#[test] +fn test_inner_attr() { + let tokens = quote! { + #![no_std] + }; + let expected = "# ! [ no_std ]"; + assert_eq!(expected, tokens.to_string()); +} + +// https://github.com/dtolnay/quote/issues/130 +#[test] +fn test_star_after_repetition() { + let c = vec!['0', '1']; + let tokens = quote! { + #( + f(#c); + )* + *out = None; + }; + let expected = "f ( '0' ) ; f ( '1' ) ; * out = None ;"; + assert_eq!(expected, tokens.to_string()); +} --- /dev/null +++ b/vendor/quote/tests/ui/does-not-have-iter-interpolated-dup.rs @@ -0,0 +1,9 @@ +use quote::quote; + +fn main() { + let nonrep = ""; + + // Without some protection against repetitions with no iterator somewhere + // inside, this would loop infinitely. + quote!(#(#nonrep #nonrep)*); +} --- /dev/null +++ b/vendor/quote/tests/ui/does-not-have-iter-interpolated.rs @@ -0,0 +1,9 @@ +use quote::quote; + +fn main() { + let nonrep = ""; + + // Without some protection against repetitions with no iterator somewhere + // inside, this would loop infinitely. + quote!(#(#nonrep)*); +} --- /dev/null +++ b/vendor/quote/tests/ui/does-not-have-iter-separated.rs @@ -0,0 +1,5 @@ +use quote::quote; + +fn main() { + quote!(#(a b),*); +} --- /dev/null +++ b/vendor/quote/tests/ui/does-not-have-iter.rs @@ -0,0 +1,5 @@ +use quote::quote; + +fn main() { + quote!(#(a b)*); +} --- /dev/null +++ b/vendor/quote/tests/ui/not-quotable.rs @@ -0,0 +1,7 @@ +use quote::quote; +use std::net::Ipv4Addr; + +fn main() { + let ip = Ipv4Addr::LOCALHOST; + let _ = quote! { #ip }; +} --- /dev/null +++ b/vendor/quote/tests/ui/not-repeatable.rs @@ -0,0 +1,7 @@ +use quote::quote; +use std::net::Ipv4Addr; + +fn main() { + let ip = Ipv4Addr::LOCALHOST; + let _ = quote! { #(#ip)* }; +} --- /dev/null +++ b/vendor/quote/tests/ui/wrong-type-span.rs @@ -0,0 +1,7 @@ +use quote::quote_spanned; + +fn main() { + let span = ""; + let x = 0; + quote_spanned!(span=> #x); +} --- /dev/null +++ b/vendor/syn-0.15.7/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"cc823b5150d40948fb45042c1987dd8ede59ed6aa64003f622c4b901e319a218","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"f033c371492a4769d377a8bf1a139adc7bf94ea00595b867a3e234eeab994c8c","README.md":"9ca76bd182b81395755fbf0a4f1af086a46a90848f45b75210517004e751afe2","src/attr.rs":"ac161a3011bcd3381eb143e0954851fcff3e82e47075e6c9f6191ccf8ee67fee","src/buffer.rs":"a82b47bb12ec0de4159a7677712db5f4f54c145eb61aa88a1696d32cf143d50e","src/data.rs":"c34df5d623fed7a52be01fc625a4502f6ad97dc8feb25c51f7d57029dbdd91dd","src/derive.rs":"7a2246e8ee03fcbea040363d0b943daac696667ba799218231307c1b7a96aeed","src/error.rs":"93a265306ee6d265feeccb65b64e429ec6b4bb29d825cb52a319ea86e5cc1c11","src/export.rs":"39cc2468a141fb8229c9189dfe99875c278850714b1996e683a5b4cbc8aa3457","src/expr.rs":"91bab694502cebc56bdcd45219f1cf317ff857320d855e595ec2acc0f9ab781a","src/ext.rs":"4902ffc7dc25a1bb5813d5292a3df7cbf72ebad79def578c7cd231cf67e1785c","src/file.rs":"ebd350b5ff548cdbb21a0fadd5572575a216a1b0caef36dd46477ca324c7af6f","src/gen/fold.rs":"bfca5243b4694cc6a9b0d1f34ca53fa90387325fd4ee6bce024adb3ca42f4472","src/gen/visit.rs":"4d13b239db7c38a38f8ce5dfe827317ec4d35df83dd65ad5a350a3c882632bfd","src/gen/visit_mut.rs":"f5c8aea5907084c2425cdb613a07def41735c764022f7549876c9fa4c7170d5a","src/gen_helper.rs":"d128fbd24fadfc5634976bdb9188c649f9905718c9c987a2839c3e6134b155a2","src/generics.rs":"3b07bcfa970714c38c2b524da765e5b0465832d91aba9ed40b99b4aa7a7ea9c4","src/group.rs":"7faa0b3701b6e597a5766e316733cd4d88ecc2a24b54b233f33e28c23a7cbad8","src/ident.rs":"61534c48949ebfa03e948874ef64174e1c8111c3b953edd58f79748fe9e00507","src/item.rs":"897cfd8ea6f2ff1a664e2a5db84073f5ed1480318d14236c636c94608016b27c","src/keyword.rs":"0a8fd45d065c56532065f97fb097136b6f1a8efc393a0946e6a95217877616a9","src/lib.rs":"79664eb2d3570c2851c0d6e5dde4e9764619e14c5f107ff07d1416d2a15f8c1a","src/lifetime.rs":"3174a81cea3eef0ec1555900b96b1641d6d3ed3609bc17d72b02a1495377ac35","src/lit.rs":"661bf3ad4b49bc74dc808c1f1d584551689145d3c5fbadfcc28d157d5087981b","src/lookahead.rs":"07ce6d6915f24a01f86a486135eb841a3a01424148fe49ea797c5ffacf0c7673","src/mac.rs":"8a7efbdc9498612844c88660312117c401685bf300da7e91bef7effde3026019","src/macros.rs":"03d33128d9c2d2664cc2d3d158518c60cadad8b340945c6eb292fb7bd92e1b41","src/op.rs":"83bbe2199946abbf1d4a0bc6eb0d97285439f77e8e02646b8e3b707485e4301e","src/parse.rs":"248cfe3d826cf192efd5fef1b52db5075d3739e045f42157908596fc039a741b","src/parse_quote.rs":"e6f8101568d8430d6793183dfedfee01c2c479274ff7318dd221060ac140a477","src/path.rs":"e666c702d46e2849cdc37fddc2e1950659cd17611ebf988102f2bf0af72b6bd1","src/print.rs":"7ebb68123898f2ebbae12abf028747c05bea7b08f1e96b17164f1dcebdab7355","src/punctuated.rs":"01539dcb51c75e0fe0a4cdfb7716a909ce1bfd0767370c04043159a0a0dec154","src/span.rs":"748c51c6feb223c26d3b1701f5bb98aee823666c775c98106cfa24fe29d8cec1","src/spanned.rs":"0d9bdef967d339deae5e2229f9593f48b15af67cf1f79358aa464cacd173f32c","src/token.rs":"40c406da738c52e52944585acc5ff36b75edb905b78cfb2bd74626663edb2c99","src/tt.rs":"6ff2559d5c5fcaa73e914cd0a4a5984ab21de7ea334f1c1498e73059d2d1f7d1","src/ty.rs":"503e0ae7da33ecd6d733aa3d14a45ced20696b9bdd52d3f9ef23fd31ec5651da"},"package":"455a6ec9b368f8c479b0ae5494d13b22dc00990d2f00d68c9dc6a2dc4f17f210"} --- /dev/null +++ b/vendor/syn-0.15.7/Cargo.toml @@ -0,0 +1,70 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "syn" +version = "0.15.7" +authors = ["David Tolnay "] +include = ["/Cargo.toml", "/src/**/*.rs", "/README.md", "/LICENSE-APACHE", "/LICENSE-MIT"] +description = "Parser for Rust source code" +documentation = "https://docs.rs/syn" +readme = "README.md" +categories = ["development-tools::procedural-macro-helpers"] +license = "MIT/Apache-2.0" +repository = "https://github.com/dtolnay/syn" +[package.metadata.docs.rs] +all-features = true + +[package.metadata.playground] +all-features = true + +[lib] +name = "syn" + +[[example]] +name = "dump-syntax" +path = "examples/dump-syntax/main.rs" +required-features = ["full", "parsing", "extra-traits"] +[dependencies.proc-macro2] +version = "0.4.4" +default-features = false + +[dependencies.quote] +version = "0.6" +optional = true +default-features = false + +[dependencies.unicode-xid] +version = "0.1" +[dev-dependencies.rayon] +version = "1.0" + +[dev-dependencies.regex] +version = "1.0" + +[dev-dependencies.walkdir] +version = "2.1" + +[features] +clone-impls = [] +default = ["derive", "parsing", "printing", "clone-impls", "proc-macro"] +derive = [] +extra-traits = [] +fold = [] +full = [] +parsing = [] +printing = ["quote"] +proc-macro = ["proc-macro2/proc-macro", "quote/proc-macro"] +visit = [] +visit-mut = [] +[badges.travis-ci] +repository = "dtolnay/syn" --- /dev/null +++ b/vendor/syn-0.15.7/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. --- /dev/null +++ b/vendor/syn-0.15.7/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2018 Syn Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. --- /dev/null +++ b/vendor/syn-0.15.7/README.md @@ -0,0 +1,281 @@ +Parser for Rust source code +=========================== + +[![Build Status](https://api.travis-ci.org/dtolnay/syn.svg?branch=master)](https://travis-ci.org/dtolnay/syn) +[![Latest Version](https://img.shields.io/crates/v/syn.svg)](https://crates.io/crates/syn) +[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/syn/0.15/syn/) +[![Rustc Version 1.15+](https://img.shields.io/badge/rustc-1.15+-lightgray.svg)](https://blog.rust-lang.org/2017/02/02/Rust-1.15.html) + +Syn is a parsing library for parsing a stream of Rust tokens into a syntax tree +of Rust source code. + +Currently this library is geared toward use in Rust procedural macros, but +contains some APIs that may be useful more generally. + +[custom derive]: https://github.com/rust-lang/rfcs/blob/master/text/1681-macros-1.1.md + +- **Data structures** — Syn provides a complete syntax tree that can represent + any valid Rust source code. The syntax tree is rooted at [`syn::File`] which + represents a full source file, but there are other entry points that may be + useful to procedural macros including [`syn::Item`], [`syn::Expr`] and + [`syn::Type`]. + +- **Custom derives** — Of particular interest to custom derives is + [`syn::DeriveInput`] which is any of the three legal input items to a derive + macro. An example below shows using this type in a library that can derive + implementations of a trait of your own. + +- **Parsing** — Parsing in Syn is built around [parser functions] with the + signature `fn(ParseStream) -> Result`. Every syntax tree node defined by + Syn is individually parsable and may be used as a building block for custom + syntaxes, or you may dream up your own brand new syntax without involving any + of our syntax tree types. + +- **Location information** — Every token parsed by Syn is associated with a + `Span` that tracks line and column information back to the source of that + token. These spans allow a procedural macro to display detailed error messages + pointing to all the right places in the user's code. There is an example of + this below. + +- **Feature flags** — Functionality is aggressively feature gated so your + procedural macros enable only what they need, and do not pay in compile time + for all the rest. + +[`syn::File`]: https://docs.rs/syn/0.15/syn/struct.File.html +[`syn::Item`]: https://docs.rs/syn/0.15/syn/enum.Item.html +[`syn::Expr`]: https://docs.rs/syn/0.15/syn/enum.Expr.html +[`syn::Type`]: https://docs.rs/syn/0.15/syn/enum.Type.html +[`syn::DeriveInput`]: https://docs.rs/syn/0.15/syn/struct.DeriveInput.html +[parser functions]: https://docs.rs/syn/0.15/syn/parse/index.html + +If you get stuck with anything involving procedural macros in Rust I am happy to +provide help even if the issue is not related to Syn. Please file a ticket in +this repo. + +*Version requirement: Syn supports any compiler version back to Rust's very +first support for procedural macros in Rust 1.15.0. Some features especially +around error reporting are only available in newer compilers or on the nightly +channel.* + +[*Release notes*](https://github.com/dtolnay/syn/releases) + +## Example of a custom derive + +The canonical custom derive using Syn looks like this. We write an ordinary Rust +function tagged with a `proc_macro_derive` attribute and the name of the trait +we are deriving. Any time that derive appears in the user's code, the Rust +compiler passes their data structure as tokens into our macro. We get to execute +arbitrary Rust code to figure out what to do with those tokens, then hand some +tokens back to the compiler to compile into the user's crate. + +[`TokenStream`]: https://doc.rust-lang.org/proc_macro/struct.TokenStream.html + +```toml +[dependencies] +syn = "0.15" +quote = "0.6" + +[lib] +proc-macro = true +``` + +```rust +extern crate proc_macro; +extern crate syn; + +#[macro_use] +extern crate quote; + +use proc_macro::TokenStream; +use syn::{parse_macro_input, DeriveInput}; + +#[proc_macro_derive(MyMacro)] +pub fn my_macro(input: TokenStream) -> TokenStream { + // Parse the input tokens into a syntax tree + let input = parse_macro_input!(input as DeriveInput); + + // Build the output, possibly using quasi-quotation + let expanded = quote! { + // ... + }; + + // Hand the output tokens back to the compiler + TokenStream::from(expanded) +} +``` + +The [`heapsize`] example directory shows a complete working Macros 1.1 +implementation of a custom derive. It works on any Rust compiler \>=1.15.0. The +example derives a `HeapSize` trait which computes an estimate of the amount of +heap memory owned by a value. + +[`heapsize`]: examples/heapsize + +```rust +pub trait HeapSize { + /// Total number of bytes of heap memory owned by `self`. + fn heap_size_of_children(&self) -> usize; +} +``` + +The custom derive allows users to write `#[derive(HeapSize)]` on data structures +in their program. + +```rust +#[derive(HeapSize)] +struct Demo<'a, T: ?Sized> { + a: Box, + b: u8, + c: &'a str, + d: String, +} +``` + +## Spans and error reporting + +The [`heapsize2`] example directory is an extension of the `heapsize` example +that demonstrates some of the hygiene and error reporting properties of Macros +2.0. This example currently requires a nightly Rust compiler \>=1.24.0-nightly +but we are working to stabilize all of the APIs involved. + +[`heapsize2`]: examples/heapsize2 + +The token-based procedural macro API provides great control over where the +compiler's error messages are displayed in user code. Consider the error the +user sees if one of their field types does not implement `HeapSize`. + +```rust +#[derive(HeapSize)] +struct Broken { + ok: String, + bad: std::thread::Thread, +} +``` + +In the Macros 1.1 string-based procedural macro world, the resulting error would +point unhelpfully to the invocation of the derive macro and not to the actual +problematic field. + +``` +error[E0599]: no method named `heap_size_of_children` found for type `std::thread::Thread` in the current scope + --> src/main.rs:4:10 + | +4 | #[derive(HeapSize)] + | ^^^^^^^^ +``` + +By tracking span information all the way through the expansion of a procedural +macro as shown in the `heapsize2` example, token-based macros in Syn are able to +trigger errors that directly pinpoint the source of the problem. + +``` +error[E0277]: the trait bound `std::thread::Thread: HeapSize` is not satisfied + --> src/main.rs:7:5 + | +7 | bad: std::thread::Thread, + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `HeapSize` is not implemented for `std::thread::Thread` +``` + +## Parsing a custom syntax + +The [`lazy-static`] example directory shows the implementation of a +`functionlike!(...)` procedural macro in which the input tokens are parsed using +Syn's parsing API. + +[`lazy-static`]: examples/lazy-static + +The example reimplements the popular `lazy_static` crate from crates.io as a +procedural macro. + +``` +lazy_static! { + static ref USERNAME: Regex = Regex::new("^[a-z0-9_-]{3,16}$").unwrap(); +} +``` + +The implementation shows how to trigger custom warnings and error messages on +the macro input. + +``` +warning: come on, pick a more creative name + --> src/main.rs:10:16 + | +10 | static ref FOO: String = "lazy_static".to_owned(); + | ^^^ +``` + +## Debugging + +When developing a procedural macro it can be helpful to look at what the +generated code looks like. Use `cargo rustc -- -Zunstable-options +--pretty=expanded` or the [`cargo expand`] subcommand. + +[`cargo expand`]: https://github.com/dtolnay/cargo-expand + +To show the expanded code for some crate that uses your procedural macro, run +`cargo expand` from that crate. To show the expanded code for one of your own +test cases, run `cargo expand --test the_test_case` where the last argument is +the name of the test file without the `.rs` extension. + +This write-up by Brandon W Maister discusses debugging in more detail: +[Debugging Rust's new Custom Derive system][debugging]. + +[debugging]: https://quodlibetor.github.io/posts/debugging-rusts-new-custom-derive-system/ + +## Optional features + +Syn puts a lot of functionality behind optional features in order to optimize +compile time for the most common use cases. The following features are +available. + +- **`derive`** *(enabled by default)* — Data structures for representing the + possible input to a custom derive, including structs and enums and types. +- **`full`** — Data structures for representing the syntax tree of all valid + Rust source code, including items and expressions. +- **`parsing`** *(enabled by default)* — Ability to parse input tokens into a + syntax tree node of a chosen type. +- **`printing`** *(enabled by default)* — Ability to print a syntax tree node as + tokens of Rust source code. +- **`visit`** — Trait for traversing a syntax tree. +- **`visit-mut`** — Trait for traversing and mutating in place a syntax tree. +- **`fold`** — Trait for transforming an owned syntax tree. +- **`clone-impls`** *(enabled by default)* — Clone impls for all syntax tree + types. +- **`extra-traits`** — Debug, Eq, PartialEq, Hash impls for all syntax tree + types. +- **`proc-macro`** *(enabled by default)* — Runtime dependency on the dynamic + library libproc_macro from rustc toolchain. + +## Nightly features + +By default Syn uses the [`proc-macro2`] crate to emulate the nightly compiler's +procedural macro API in a stable way that works all the way back to Rust 1.15.0. +This shim makes it possible to write code without regard for whether the current +compiler version supports the features we use. + +[`proc-macro2`]: https://github.com/alexcrichton/proc-macro2 + +On a nightly compiler, to eliminate the stable shim and use the compiler's +`proc-macro` directly, add `proc-macro2` to your Cargo.toml and set its +`"nightly"` feature which bypasses the stable shim. + +```toml +[dependencies] +syn = "0.15" +proc-macro2 = { version = "0.4", features = ["nightly"] } +``` + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this crate by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. --- /dev/null +++ b/vendor/syn-0.15.7/src/attr.rs @@ -0,0 +1,550 @@ +// Copyright 2018 Syn Developers +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::*; +use punctuated::Punctuated; + +use std::iter; + +use proc_macro2::{Delimiter, Spacing, TokenStream, TokenTree}; + +#[cfg(feature = "parsing")] +use parse::{ParseStream, Result}; +#[cfg(feature = "extra-traits")] +use std::hash::{Hash, Hasher}; +#[cfg(feature = "extra-traits")] +use tt::TokenStreamHelper; + +ast_struct! { + /// An attribute like `#[repr(transparent)]`. + /// + /// *This type is available if Syn is built with the `"derive"` or `"full"` + /// feature.* + /// + /// # Syntax + /// + /// Rust has six types of attributes. + /// + /// - Outer attributes like `#[repr(transparent)]`. These appear outside or + /// in front of the item they describe. + /// - Inner attributes like `#![feature(proc_macro)]`. These appear inside + /// of the item they describe, usually a module. + /// - Outer doc comments like `/// # Example`. + /// - Inner doc comments like `//! Please file an issue`. + /// - Outer block comments `/** # Example */`. + /// - Inner block comments `/*! Please file an issue */`. + /// + /// The `style` field of type `AttrStyle` distinguishes whether an attribute + /// is outer or inner. Doc comments and block comments are promoted to + /// attributes, as this is how they are processed by the compiler and by + /// `macro_rules!` macros. + /// + /// The `path` field gives the possibly colon-delimited path against which + /// the attribute is resolved. It is equal to `"doc"` for desugared doc + /// comments. The `tts` field contains the rest of the attribute body as + /// tokens. + /// + /// ```text + /// #[derive(Copy)] #[crate::precondition x < 5] + /// ^^^^^^~~~~~~ ^^^^^^^^^^^^^^^^^^^ ~~~~~ + /// path tts path tts + /// ``` + /// + /// Use the [`interpret_meta`] method to try parsing the tokens of an + /// attribute into the structured representation that is used by convention + /// across most Rust libraries. + /// + /// [`interpret_meta`]: #method.interpret_meta + /// + /// # Parsing + /// + /// This type does not implement the [`Parse`] trait and thus cannot be + /// parsed directly by [`ParseStream::parse`]. Instead use + /// [`ParseStream::call`] with one of the two parser functions + /// [`Attribute::parse_outer`] or [`Attribute::parse_inner`] depending on + /// which you intend to parse. + /// + /// [`Parse`]: parse/trait.Parse.html + /// [`ParseStream::parse`]: parse/struct.ParseBuffer.html#method.parse + /// [`ParseStream::call`]: parse/struct.ParseBuffer.html#method.call + /// [`Attribute::parse_outer`]: #method.parse_outer + /// [`Attribute::parse_inner`]: #method.parse_inner + /// + /// ``` + /// #[macro_use] + /// extern crate syn; + /// + /// use syn::{Attribute, Ident}; + /// use syn::parse::{Parse, ParseStream, Result}; + /// + /// // Parses a unit struct with attributes. + /// // + /// // #[path = "s.tmpl"] + /// // struct S; + /// struct UnitStruct { + /// attrs: Vec, + /// struct_token: Token![struct], + /// name: Ident, + /// semi_token: Token![;], + /// } + /// + /// impl Parse for UnitStruct { + /// fn parse(input: ParseStream) -> Result { + /// Ok(UnitStruct { + /// attrs: input.call(Attribute::parse_outer)?, + /// struct_token: input.parse()?, + /// name: input.parse()?, + /// semi_token: input.parse()?, + /// }) + /// } + /// } + /// # + /// # fn main() {} + /// ``` + pub struct Attribute #manual_extra_traits { + pub pound_token: Token![#], + pub style: AttrStyle, + pub bracket_token: token::Bracket, + pub path: Path, + pub tts: TokenStream, + } +} + +#[cfg(feature = "extra-traits")] +impl Eq for Attribute {} + +#[cfg(feature = "extra-traits")] +impl PartialEq for Attribute { + fn eq(&self, other: &Self) -> bool { + self.style == other.style + && self.pound_token == other.pound_token + && self.bracket_token == other.bracket_token + && self.path == other.path + && TokenStreamHelper(&self.tts) == TokenStreamHelper(&other.tts) + } +} + +#[cfg(feature = "extra-traits")] +impl Hash for Attribute { + fn hash(&self, state: &mut H) + where + H: Hasher, + { + self.style.hash(state); + self.pound_token.hash(state); + self.bracket_token.hash(state); + self.path.hash(state); + TokenStreamHelper(&self.tts).hash(state); + } +} + +impl Attribute { + /// Parses the tokens after the path as a [`Meta`](enum.Meta.html) if + /// possible. + pub fn interpret_meta(&self) -> Option { + let name = if self.path.segments.len() == 1 { + &self.path.segments.first().unwrap().value().ident + } else { + return None; + }; + + if self.tts.is_empty() { + return Some(Meta::Word(name.clone())); + } + + let tts = self.tts.clone().into_iter().collect::>(); + + if tts.len() == 1 { + if let Some(meta) = Attribute::extract_meta_list(name.clone(), &tts[0]) { + return Some(meta); + } + } + + if tts.len() == 2 { + if let Some(meta) = Attribute::extract_name_value(name.clone(), &tts[0], &tts[1]) { + return Some(meta); + } + } + + None + } + + /// Parses zero or more outer attributes from the stream. + /// + /// *This function is available if Syn is built with the `"parsing"` + /// feature.* + #[cfg(feature = "parsing")] + pub fn parse_outer(input: ParseStream) -> Result> { + let mut attrs = Vec::new(); + while input.peek(Token![#]) { + attrs.push(input.call(parsing::single_parse_outer)?); + } + Ok(attrs) + } + + /// Parses zero or more inner attributes from the stream. + /// + /// *This function is available if Syn is built with the `"parsing"` + /// feature.* + #[cfg(feature = "parsing")] + pub fn parse_inner(input: ParseStream) -> Result> { + let mut attrs = Vec::new(); + while input.peek(Token![#]) && input.peek2(Token![!]) { + attrs.push(input.call(parsing::single_parse_inner)?); + } + Ok(attrs) + } + + fn extract_meta_list(ident: Ident, tt: &TokenTree) -> Option { + let g = match *tt { + TokenTree::Group(ref g) => g, + _ => return None, + }; + if g.delimiter() != Delimiter::Parenthesis { + return None; + } + let tokens = g.stream().clone().into_iter().collect::>(); + let nested = match list_of_nested_meta_items_from_tokens(&tokens) { + Some(n) => n, + None => return None, + }; + Some(Meta::List(MetaList { + paren_token: token::Paren(g.span()), + ident: ident, + nested: nested, + })) + } + + fn extract_name_value(ident: Ident, a: &TokenTree, b: &TokenTree) -> Option { + let a = match *a { + TokenTree::Punct(ref o) => o, + _ => return None, + }; + if a.spacing() != Spacing::Alone { + return None; + } + if a.as_char() != '=' { + return None; + } + + match *b { + TokenTree::Literal(ref l) if !l.to_string().starts_with('/') => { + Some(Meta::NameValue(MetaNameValue { + ident: ident, + eq_token: Token![=]([a.span()]), + lit: Lit::new(l.clone()), + })) + } + TokenTree::Ident(ref v) => match &v.to_string()[..] { + v @ "true" | v @ "false" => Some(Meta::NameValue(MetaNameValue { + ident: ident, + eq_token: Token![=]([a.span()]), + lit: Lit::Bool(LitBool { + value: v == "true", + span: b.span(), + }), + })), + _ => None, + }, + _ => None, + } + } +} + +fn nested_meta_item_from_tokens(tts: &[TokenTree]) -> Option<(NestedMeta, &[TokenTree])> { + assert!(!tts.is_empty()); + + match tts[0] { + TokenTree::Literal(ref lit) => { + if lit.to_string().starts_with('/') { + None + } else { + let lit = Lit::new(lit.clone()); + Some((NestedMeta::Literal(lit), &tts[1..])) + } + } + + TokenTree::Ident(ref ident) => { + if tts.len() >= 3 { + if let Some(meta) = Attribute::extract_name_value(ident.clone(), &tts[1], &tts[2]) { + return Some((NestedMeta::Meta(meta), &tts[3..])); + } + } + + if tts.len() >= 2 { + if let Some(meta) = Attribute::extract_meta_list(ident.clone(), &tts[1]) { + return Some((NestedMeta::Meta(meta), &tts[2..])); + } + } + + let nested_meta = if ident == "true" || ident == "false" { + NestedMeta::Literal(Lit::Bool(LitBool { + value: ident == "true", + span: ident.span(), + })) + } else { + NestedMeta::Meta(Meta::Word(ident.clone())) + }; + Some((nested_meta, &tts[1..])) + } + + _ => None, + } +} + +fn list_of_nested_meta_items_from_tokens( + mut tts: &[TokenTree], +) -> Option> { + let mut nested_meta_items = Punctuated::new(); + let mut first = true; + + while !tts.is_empty() { + let prev_comma = if first { + first = false; + None + } else if let TokenTree::Punct(ref op) = tts[0] { + if op.spacing() != Spacing::Alone { + return None; + } + if op.as_char() != ',' { + return None; + } + let tok = Token![,]([op.span()]); + tts = &tts[1..]; + if tts.is_empty() { + break; + } + Some(tok) + } else { + return None; + }; + let (nested, rest) = match nested_meta_item_from_tokens(tts) { + Some(pair) => pair, + None => return None, + }; + if let Some(comma) = prev_comma { + nested_meta_items.push_punct(comma); + } + nested_meta_items.push_value(nested); + tts = rest; + } + + Some(nested_meta_items) +} + +ast_enum! { + /// Distinguishes between attributes that decorate an item and attributes + /// that are contained within an item. + /// + /// *This type is available if Syn is built with the `"derive"` or `"full"` + /// feature.* + /// + /// # Outer attributes + /// + /// - `#[repr(transparent)]` + /// - `/// # Example` + /// - `/** Please file an issue */` + /// + /// # Inner attributes + /// + /// - `#![feature(proc_macro)]` + /// - `//! # Example` + /// - `/*! Please file an issue */` + #[cfg_attr(feature = "clone-impls", derive(Copy))] + pub enum AttrStyle { + Outer, + Inner(Token![!]), + } +} + +ast_enum_of_structs! { + /// Content of a compile-time structured attribute. + /// + /// *This type is available if Syn is built with the `"derive"` or `"full"` + /// feature.* + /// + /// ## Word + /// + /// A meta word is like the `test` in `#[test]`. + /// + /// ## List + /// + /// A meta list is like the `derive(Copy)` in `#[derive(Copy)]`. + /// + /// ## NameValue + /// + /// A name-value meta is like the `path = "..."` in `#[path = + /// "sys/windows.rs"]`. + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums + pub enum Meta { + pub Word(Ident), + /// A structured list within an attribute, like `derive(Copy, Clone)`. + /// + /// *This type is available if Syn is built with the `"derive"` or + /// `"full"` feature.* + pub List(MetaList { + pub ident: Ident, + pub paren_token: token::Paren, + pub nested: Punctuated, + }), + /// A name-value pair within an attribute, like `feature = "nightly"`. + /// + /// *This type is available if Syn is built with the `"derive"` or + /// `"full"` feature.* + pub NameValue(MetaNameValue { + pub ident: Ident, + pub eq_token: Token![=], + pub lit: Lit, + }), + } +} + +impl Meta { + /// Returns the identifier that begins this structured meta item. + /// + /// For example this would return the `test` in `#[test]`, the `derive` in + /// `#[derive(Copy)]`, and the `path` in `#[path = "sys/windows.rs"]`. + pub fn name(&self) -> Ident { + match *self { + Meta::Word(ref meta) => meta.clone(), + Meta::List(ref meta) => meta.ident.clone(), + Meta::NameValue(ref meta) => meta.ident.clone(), + } + } +} + +ast_enum_of_structs! { + /// Element of a compile-time attribute list. + /// + /// *This type is available if Syn is built with the `"derive"` or `"full"` + /// feature.* + pub enum NestedMeta { + /// A structured meta item, like the `Copy` in `#[derive(Copy)]` which + /// would be a nested `Meta::Word`. + pub Meta(Meta), + + /// A Rust literal, like the `"new_name"` in `#[rename("new_name")]`. + pub Literal(Lit), + } +} + +pub trait FilterAttrs<'a> { + type Ret: Iterator; + + fn outer(self) -> Self::Ret; + fn inner(self) -> Self::Ret; +} + +impl<'a, T> FilterAttrs<'a> for T +where + T: IntoIterator, +{ + type Ret = iter::Filter bool>; + + fn outer(self) -> Self::Ret { + fn is_outer(attr: &&Attribute) -> bool { + match attr.style { + AttrStyle::Outer => true, + _ => false, + } + } + self.into_iter().filter(is_outer) + } + + fn inner(self) -> Self::Ret { + fn is_inner(attr: &&Attribute) -> bool { + match attr.style { + AttrStyle::Inner(_) => true, + _ => false, + } + } + self.into_iter().filter(is_inner) + } +} + +#[cfg(feature = "parsing")] +pub mod parsing { + use super::*; + + use parse::{ParseStream, Result}; + #[cfg(feature = "full")] + use private; + + pub fn single_parse_inner(input: ParseStream) -> Result { + let content; + Ok(Attribute { + pound_token: input.parse()?, + style: AttrStyle::Inner(input.parse()?), + bracket_token: bracketed!(content in input), + path: content.call(Path::parse_mod_style)?, + tts: content.parse()?, + }) + } + + pub fn single_parse_outer(input: ParseStream) -> Result { + let content; + Ok(Attribute { + pound_token: input.parse()?, + style: AttrStyle::Outer, + bracket_token: bracketed!(content in input), + path: content.call(Path::parse_mod_style)?, + tts: content.parse()?, + }) + } + + #[cfg(feature = "full")] + impl private { + pub fn attrs(outer: Vec, inner: Vec) -> Vec { + let mut attrs = outer; + attrs.extend(inner); + attrs + } + } +} + +#[cfg(feature = "printing")] +mod printing { + use super::*; + use proc_macro2::TokenStream; + use quote::ToTokens; + + impl ToTokens for Attribute { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.pound_token.to_tokens(tokens); + if let AttrStyle::Inner(ref b) = self.style { + b.to_tokens(tokens); + } + self.bracket_token.surround(tokens, |tokens| { + self.path.to_tokens(tokens); + self.tts.to_tokens(tokens); + }); + } + } + + impl ToTokens for MetaList { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.ident.to_tokens(tokens); + self.paren_token.surround(tokens, |tokens| { + self.nested.to_tokens(tokens); + }) + } + } + + impl ToTokens for MetaNameValue { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.ident.to_tokens(tokens); + self.eq_token.to_tokens(tokens); + self.lit.to_tokens(tokens); + } + } +} --- /dev/null +++ b/vendor/syn-0.15.7/src/buffer.rs @@ -0,0 +1,355 @@ +// Copyright 2018 Syn Developers +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A stably addressed token buffer supporting efficient traversal based on a +//! cheaply copyable cursor. +//! +//! *This module is available if Syn is built with the `"parsing"` feature.* + +// This module is heavily commented as it contains the only unsafe code in Syn, +// and caution should be used when editing it. The public-facing interface is +// 100% safe but the implementation is fragile internally. + +#[cfg(all( + not(all(target_arch = "wasm32", target_os = "unknown")), + feature = "proc-macro" +))] +use proc_macro as pm; +use proc_macro2::{Delimiter, Group, Ident, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; + +use std::marker::PhantomData; +use std::ptr; + +use Lifetime; + +/// Internal type which is used instead of `TokenTree` to represent a token tree +/// within a `TokenBuffer`. +enum Entry { + // Mimicking types from proc-macro. + Group(Group, TokenBuffer), + Ident(Ident), + Punct(Punct), + Literal(Literal), + // End entries contain a raw pointer to the entry from the containing + // token tree, or null if this is the outermost level. + End(*const Entry), +} + +/// A buffer that can be efficiently traversed multiple times, unlike +/// `TokenStream` which requires a deep copy in order to traverse more than +/// once. +/// +/// *This type is available if Syn is built with the `"parsing"` feature.* +pub struct TokenBuffer { + // NOTE: Do not derive clone on this - there are raw pointers inside which + // will be messed up. Moving the `TokenBuffer` itself is safe as the actual + // backing slices won't be moved. + data: Box<[Entry]>, +} + +impl TokenBuffer { + // NOTE: DO NOT MUTATE THE `Vec` RETURNED FROM THIS FUNCTION ONCE IT + // RETURNS, THE ADDRESS OF ITS BACKING MEMORY MUST REMAIN STABLE. + fn inner_new(stream: TokenStream, up: *const Entry) -> TokenBuffer { + // Build up the entries list, recording the locations of any Groups + // in the list to be processed later. + let mut entries = Vec::new(); + let mut seqs = Vec::new(); + for tt in stream { + match tt { + TokenTree::Ident(sym) => { + entries.push(Entry::Ident(sym)); + } + TokenTree::Punct(op) => { + entries.push(Entry::Punct(op)); + } + TokenTree::Literal(l) => { + entries.push(Entry::Literal(l)); + } + TokenTree::Group(g) => { + // Record the index of the interesting entry, and store an + // `End(null)` there temporarially. + seqs.push((entries.len(), g)); + entries.push(Entry::End(ptr::null())); + } + } + } + // Add an `End` entry to the end with a reference to the enclosing token + // stream which was passed in. + entries.push(Entry::End(up)); + + // NOTE: This is done to ensure that we don't accidentally modify the + // length of the backing buffer. The backing buffer must remain at a + // constant address after this point, as we are going to store a raw + // pointer into it. + let mut entries = entries.into_boxed_slice(); + for (idx, group) in seqs { + // We know that this index refers to one of the temporary + // `End(null)` entries, and we know that the last entry is + // `End(up)`, so the next index is also valid. + let seq_up = &entries[idx + 1] as *const Entry; + + // The end entry stored at the end of this Entry::Group should + // point to the Entry which follows the Group in the list. + let inner = Self::inner_new(group.stream(), seq_up); + entries[idx] = Entry::Group(group, inner); + } + + TokenBuffer { data: entries } + } + + /// Creates a `TokenBuffer` containing all the tokens from the input + /// `TokenStream`. + /// + /// *This method is available if Syn is built with both the `"parsing"` and + /// `"proc-macro"` features.* + #[cfg(all( + not(all(target_arch = "wasm32", target_os = "unknown")), + feature = "proc-macro" + ))] + pub fn new(stream: pm::TokenStream) -> TokenBuffer { + Self::new2(stream.into()) + } + + /// Creates a `TokenBuffer` containing all the tokens from the input + /// `TokenStream`. + pub fn new2(stream: TokenStream) -> TokenBuffer { + Self::inner_new(stream, ptr::null()) + } + + /// Creates a cursor referencing the first token in the buffer and able to + /// traverse until the end of the buffer. + pub fn begin(&self) -> Cursor { + unsafe { Cursor::create(&self.data[0], &self.data[self.data.len() - 1]) } + } +} + +/// A cheaply copyable cursor into a `TokenBuffer`. +/// +/// This cursor holds a shared reference into the immutable data which is used +/// internally to represent a `TokenStream`, and can be efficiently manipulated +/// and copied around. +/// +/// An empty `Cursor` can be created directly, or one may create a `TokenBuffer` +/// object and get a cursor to its first token with `begin()`. +/// +/// Two cursors are equal if they have the same location in the same input +/// stream, and have the same scope. +/// +/// *This type is available if Syn is built with the `"parsing"` feature.* +#[derive(Copy, Clone, Eq, PartialEq)] +pub struct Cursor<'a> { + // The current entry which the `Cursor` is pointing at. + ptr: *const Entry, + // This is the only `Entry::End(..)` object which this cursor is allowed to + // point at. All other `End` objects are skipped over in `Cursor::create`. + scope: *const Entry, + // Cursor is covariant in 'a. This field ensures that our pointers are still + // valid. + marker: PhantomData<&'a Entry>, +} + +impl<'a> Cursor<'a> { + /// Creates a cursor referencing a static empty TokenStream. + pub fn empty() -> Self { + // It's safe in this situation for us to put an `Entry` object in global + // storage, despite it not actually being safe to send across threads + // (`Ident` is a reference into a thread-local table). This is because + // this entry never includes a `Ident` object. + // + // This wrapper struct allows us to break the rules and put a `Sync` + // object in global storage. + struct UnsafeSyncEntry(Entry); + unsafe impl Sync for UnsafeSyncEntry {} + static EMPTY_ENTRY: UnsafeSyncEntry = UnsafeSyncEntry(Entry::End(0 as *const Entry)); + + Cursor { + ptr: &EMPTY_ENTRY.0, + scope: &EMPTY_ENTRY.0, + marker: PhantomData, + } + } + + /// This create method intelligently exits non-explicitly-entered + /// `None`-delimited scopes when the cursor reaches the end of them, + /// allowing for them to be treated transparently. + unsafe fn create(mut ptr: *const Entry, scope: *const Entry) -> Self { + // NOTE: If we're looking at a `End(..)`, we want to advance the cursor + // past it, unless `ptr == scope`, which means that we're at the edge of + // our cursor's scope. We should only have `ptr != scope` at the exit + // from None-delimited groups entered with `ignore_none`. + while let Entry::End(exit) = *ptr { + if ptr == scope { + break; + } + ptr = exit; + } + + Cursor { + ptr: ptr, + scope: scope, + marker: PhantomData, + } + } + + /// Get the current entry. + fn entry(self) -> &'a Entry { + unsafe { &*self.ptr } + } + + /// Bump the cursor to point at the next token after the current one. This + /// is undefined behavior if the cursor is currently looking at an + /// `Entry::End`. + unsafe fn bump(self) -> Cursor<'a> { + Cursor::create(self.ptr.offset(1), self.scope) + } + + /// If the cursor is looking at a `None`-delimited group, move it to look at + /// the first token inside instead. If the group is empty, this will move + /// the cursor past the `None`-delimited group. + /// + /// WARNING: This mutates its argument. + fn ignore_none(&mut self) { + if let Entry::Group(ref group, ref buf) = *self.entry() { + if group.delimiter() == Delimiter::None { + // NOTE: We call `Cursor::create` here to make sure that + // situations where we should immediately exit the span after + // entering it are handled correctly. + unsafe { + *self = Cursor::create(&buf.data[0], self.scope); + } + } + } + } + + /// Checks whether the cursor is currently pointing at the end of its valid + /// scope. + #[inline] + pub fn eof(self) -> bool { + // We're at eof if we're at the end of our scope. + self.ptr == self.scope + } + + /// If the cursor is pointing at a `Group` with the given delimiter, returns + /// a cursor into that group and one pointing to the next `TokenTree`. + pub fn group(mut self, delim: Delimiter) -> Option<(Cursor<'a>, Span, Cursor<'a>)> { + // If we're not trying to enter a none-delimited group, we want to + // ignore them. We have to make sure to _not_ ignore them when we want + // to enter them, of course. For obvious reasons. + if delim != Delimiter::None { + self.ignore_none(); + } + + if let Entry::Group(ref group, ref buf) = *self.entry() { + if group.delimiter() == delim { + return Some((buf.begin(), group.span(), unsafe { self.bump() })); + } + } + + None + } + + /// If the cursor is pointing at a `Ident`, returns it along with a cursor + /// pointing at the next `TokenTree`. + pub fn ident(mut self) -> Option<(Ident, Cursor<'a>)> { + self.ignore_none(); + match *self.entry() { + Entry::Ident(ref ident) => Some((ident.clone(), unsafe { self.bump() })), + _ => None, + } + } + + /// If the cursor is pointing at an `Punct`, returns it along with a cursor + /// pointing at the next `TokenTree`. + pub fn punct(mut self) -> Option<(Punct, Cursor<'a>)> { + self.ignore_none(); + match *self.entry() { + Entry::Punct(ref op) if op.as_char() != '\'' => { + Some((op.clone(), unsafe { self.bump() })) + } + _ => None, + } + } + + /// If the cursor is pointing at a `Literal`, return it along with a cursor + /// pointing at the next `TokenTree`. + pub fn literal(mut self) -> Option<(Literal, Cursor<'a>)> { + self.ignore_none(); + match *self.entry() { + Entry::Literal(ref lit) => Some((lit.clone(), unsafe { self.bump() })), + _ => None, + } + } + + /// If the cursor is pointing at a `Lifetime`, returns it along with a + /// cursor pointing at the next `TokenTree`. + pub fn lifetime(mut self) -> Option<(Lifetime, Cursor<'a>)> { + self.ignore_none(); + match *self.entry() { + Entry::Punct(ref op) if op.as_char() == '\'' && op.spacing() == Spacing::Joint => { + let next = unsafe { self.bump() }; + match next.ident() { + Some((ident, rest)) => { + let lifetime = Lifetime { + apostrophe: op.span(), + ident: ident, + }; + Some((lifetime, rest)) + } + None => None, + } + } + _ => None, + } + } + + /// Copies all remaining tokens visible from this cursor into a + /// `TokenStream`. + pub fn token_stream(self) -> TokenStream { + let mut tts = Vec::new(); + let mut cursor = self; + while let Some((tt, rest)) = cursor.token_tree() { + tts.push(tt); + cursor = rest; + } + tts.into_iter().collect() + } + + /// If the cursor is pointing at a `TokenTree`, returns it along with a + /// cursor pointing at the next `TokenTree`. + /// + /// Returns `None` if the cursor has reached the end of its stream. + /// + /// This method does not treat `None`-delimited groups as transparent, and + /// will return a `Group(None, ..)` if the cursor is looking at one. + pub fn token_tree(self) -> Option<(TokenTree, Cursor<'a>)> { + let tree = match *self.entry() { + Entry::Group(ref group, _) => group.clone().into(), + Entry::Literal(ref lit) => lit.clone().into(), + Entry::Ident(ref ident) => ident.clone().into(), + Entry::Punct(ref op) => op.clone().into(), + Entry::End(..) => { + return None; + } + }; + + Some((tree, unsafe { self.bump() })) + } + + /// Returns the `Span` of the current token, or `Span::call_site()` if this + /// cursor points to eof. + pub fn span(self) -> Span { + match *self.entry() { + Entry::Group(ref group, _) => group.span(), + Entry::Literal(ref l) => l.span(), + Entry::Ident(ref t) => t.span(), + Entry::Punct(ref o) => o.span(), + Entry::End(..) => Span::call_site(), + } + } +} --- /dev/null +++ b/vendor/syn-0.15.7/src/data.rs @@ -0,0 +1,392 @@ +// Copyright 2018 Syn Developers +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::*; +use punctuated::Punctuated; + +ast_struct! { + /// An enum variant. + /// + /// *This type is available if Syn is built with the `"derive"` or `"full"` + /// feature.* + pub struct Variant { + /// Attributes tagged on the variant. + pub attrs: Vec, + + /// Name of the variant. + pub ident: Ident, + + /// Content stored in the variant. + pub fields: Fields, + + /// Explicit discriminant: `Variant = 1` + pub discriminant: Option<(Token![=], Expr)>, + } +} + +ast_enum_of_structs! { + /// Data stored within an enum variant or struct. + /// + /// *This type is available if Syn is built with the `"derive"` or `"full"` + /// feature.* + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums + pub enum Fields { + /// Named fields of a struct or struct variant such as `Point { x: f64, + /// y: f64 }`. + /// + /// *This type is available if Syn is built with the `"derive"` or + /// `"full"` feature.* + pub Named(FieldsNamed { + pub brace_token: token::Brace, + pub named: Punctuated, + }), + + /// Unnamed fields of a tuple struct or tuple variant such as `Some(T)`. + /// + /// *This type is available if Syn is built with the `"derive"` or + /// `"full"` feature.* + pub Unnamed(FieldsUnnamed { + pub paren_token: token::Paren, + pub unnamed: Punctuated, + }), + + /// Unit struct or unit variant such as `None`. + pub Unit, + } +} + +impl Fields { + /// Get an iterator over the borrowed [`Field`] items in this object. This + /// iterator can be used to iterate over a named or unnamed struct or + /// variant's fields uniformly. + /// + /// [`Field`]: struct.Field.html + pub fn iter(&self) -> punctuated::Iter { + match *self { + Fields::Unit => private::empty_punctuated_iter(), + Fields::Named(ref f) => f.named.iter(), + Fields::Unnamed(ref f) => f.unnamed.iter(), + } + } + + /// Get an iterator over the mutably borrowed [`Field`] items in this + /// object. This iterator can be used to iterate over a named or unnamed + /// struct or variant's fields uniformly. + /// + /// [`Field`]: struct.Field.html + pub fn iter_mut(&mut self) -> punctuated::IterMut { + match *self { + Fields::Unit => private::empty_punctuated_iter_mut(), + Fields::Named(ref mut f) => f.named.iter_mut(), + Fields::Unnamed(ref mut f) => f.unnamed.iter_mut(), + } + } +} + +impl<'a> IntoIterator for &'a Fields { + type Item = &'a Field; + type IntoIter = punctuated::Iter<'a, Field>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a> IntoIterator for &'a mut Fields { + type Item = &'a mut Field; + type IntoIter = punctuated::IterMut<'a, Field>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +ast_struct! { + /// A field of a struct or enum variant. + /// + /// *This type is available if Syn is built with the `"derive"` or `"full"` + /// feature.* + pub struct Field { + /// Attributes tagged on the field. + pub attrs: Vec, + + /// Visibility of the field. + pub vis: Visibility, + + /// Name of the field, if any. + /// + /// Fields of tuple structs have no names. + pub ident: Option, + + pub colon_token: Option, + + /// Type of the field. + pub ty: Type, + } +} + +ast_enum_of_structs! { + /// The visibility level of an item: inherited or `pub` or + /// `pub(restricted)`. + /// + /// *This type is available if Syn is built with the `"derive"` or `"full"` + /// feature.* + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums + pub enum Visibility { + /// A public visibility level: `pub`. + /// + /// *This type is available if Syn is built with the `"derive"` or + /// `"full"` feature.* + pub Public(VisPublic { + pub pub_token: Token![pub], + }), + + /// A crate-level visibility: `crate`. + /// + /// *This type is available if Syn is built with the `"derive"` or + /// `"full"` feature.* + pub Crate(VisCrate { + pub crate_token: Token![crate], + }), + + /// A visibility level restricted to some path: `pub(self)` or + /// `pub(super)` or `pub(crate)` or `pub(in some::module)`. + /// + /// *This type is available if Syn is built with the `"derive"` or + /// `"full"` feature.* + pub Restricted(VisRestricted { + pub pub_token: Token![pub], + pub paren_token: token::Paren, + pub in_token: Option, + pub path: Box, + }), + + /// An inherited visibility, which usually means private. + pub Inherited, + } +} + +#[cfg(feature = "parsing")] +pub mod parsing { + use super::*; + + use ext::IdentExt; + use parse::{Parse, ParseStream, Result}; + + impl Parse for Variant { + fn parse(input: ParseStream) -> Result { + Ok(Variant { + attrs: input.call(Attribute::parse_outer)?, + ident: input.parse()?, + fields: { + if input.peek(token::Brace) { + Fields::Named(input.parse()?) + } else if input.peek(token::Paren) { + Fields::Unnamed(input.parse()?) + } else { + Fields::Unit + } + }, + discriminant: { + if input.peek(Token![=]) { + let eq_token: Token![=] = input.parse()?; + let discriminant: Expr = input.parse()?; + Some((eq_token, discriminant)) + } else { + None + } + }, + }) + } + } + + impl Parse for FieldsNamed { + fn parse(input: ParseStream) -> Result { + let content; + Ok(FieldsNamed { + brace_token: braced!(content in input), + named: content.parse_terminated(Field::parse_named)?, + }) + } + } + + impl Parse for FieldsUnnamed { + fn parse(input: ParseStream) -> Result { + let content; + Ok(FieldsUnnamed { + paren_token: parenthesized!(content in input), + unnamed: content.parse_terminated(Field::parse_unnamed)?, + }) + } + } + + impl Field { + /// Parses a named (braced struct) field. + pub fn parse_named(input: ParseStream) -> Result { + Ok(Field { + attrs: input.call(Attribute::parse_outer)?, + vis: input.parse()?, + ident: Some(input.parse()?), + colon_token: Some(input.parse()?), + ty: input.parse()?, + }) + } + + /// Parses an unnamed (tuple struct) field. + pub fn parse_unnamed(input: ParseStream) -> Result { + Ok(Field { + attrs: input.call(Attribute::parse_outer)?, + vis: input.parse()?, + ident: None, + colon_token: None, + ty: input.parse()?, + }) + } + } + + impl Parse for Visibility { + fn parse(input: ParseStream) -> Result { + if input.peek(Token![pub]) { + Self::parse_pub(input) + } else if input.peek(Token![crate]) { + Self::parse_crate(input) + } else { + Ok(Visibility::Inherited) + } + } + } + + impl Visibility { + fn parse_pub(input: ParseStream) -> Result { + let pub_token = input.parse::()?; + + if input.peek(token::Paren) { + let ahead = input.fork(); + let mut content; + parenthesized!(content in ahead); + + if content.peek(Token![crate]) + || content.peek(Token![self]) + || content.peek(Token![super]) + { + return Ok(Visibility::Restricted(VisRestricted { + pub_token: pub_token, + paren_token: parenthesized!(content in input), + in_token: None, + path: Box::new(Path::from(content.call(Ident::parse_any)?)), + })); + } else if content.peek(Token![in]) { + return Ok(Visibility::Restricted(VisRestricted { + pub_token: pub_token, + paren_token: parenthesized!(content in input), + in_token: Some(content.parse()?), + path: Box::new(content.call(Path::parse_mod_style)?), + })); + } + } + + Ok(Visibility::Public(VisPublic { + pub_token: pub_token, + })) + } + + fn parse_crate(input: ParseStream) -> Result { + if input.peek2(Token![::]) { + Ok(Visibility::Inherited) + } else { + Ok(Visibility::Crate(VisCrate { + crate_token: input.parse()?, + })) + } + } + } +} + +#[cfg(feature = "printing")] +mod printing { + use super::*; + + use proc_macro2::TokenStream; + use quote::{ToTokens, TokenStreamExt}; + + use print::TokensOrDefault; + + impl ToTokens for Variant { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(&self.attrs); + self.ident.to_tokens(tokens); + self.fields.to_tokens(tokens); + if let Some((ref eq_token, ref disc)) = self.discriminant { + eq_token.to_tokens(tokens); + disc.to_tokens(tokens); + } + } + } + + impl ToTokens for FieldsNamed { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.brace_token.surround(tokens, |tokens| { + self.named.to_tokens(tokens); + }); + } + } + + impl ToTokens for FieldsUnnamed { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.paren_token.surround(tokens, |tokens| { + self.unnamed.to_tokens(tokens); + }); + } + } + + impl ToTokens for Field { + fn to_tokens(&self, tokens: &mut TokenStream) { + tokens.append_all(&self.attrs); + self.vis.to_tokens(tokens); + if let Some(ref ident) = self.ident { + ident.to_tokens(tokens); + TokensOrDefault(&self.colon_token).to_tokens(tokens); + } + self.ty.to_tokens(tokens); + } + } + + impl ToTokens for VisPublic { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.pub_token.to_tokens(tokens) + } + } + + impl ToTokens for VisCrate { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.crate_token.to_tokens(tokens); + } + } + + impl ToTokens for VisRestricted { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.pub_token.to_tokens(tokens); + self.paren_token.surround(tokens, |tokens| { + // TODO: If we have a path which is not "self" or "super" or + // "crate", automatically add the "in" token. + self.in_token.to_tokens(tokens); + self.path.to_tokens(tokens); + }); + } + } +} --- /dev/null +++ b/vendor/syn-0.15.7/src/derive.rs @@ -0,0 +1,263 @@ +// Copyright 2018 Syn Developers +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::*; +use punctuated::Punctuated; + +ast_struct! { + /// Data structure sent to a `proc_macro_derive` macro. + /// + /// *This type is available if Syn is built with the `"derive"` feature.* + pub struct DeriveInput { + /// Attributes tagged on the whole struct or enum. + pub attrs: Vec, + + /// Visibility of the struct or enum. + pub vis: Visibility, + + /// Name of the struct or enum. + pub ident: Ident, + + /// Generics required to complete the definition. + pub generics: Generics, + + /// Data within the struct or enum. + pub data: Data, + } +} + +ast_enum_of_structs! { + /// The storage of a struct, enum or union data structure. + /// + /// *This type is available if Syn is built with the `"derive"` feature.* + /// + /// # Syntax tree enum + /// + /// This type is a [syntax tree enum]. + /// + /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums + pub enum Data { + /// A struct input to a `proc_macro_derive` macro. + /// + /// *This type is available if Syn is built with the `"derive"` + /// feature.* + pub Struct(DataStruct { + pub struct_token: Token![struct], + pub fields: Fields, + pub semi_token: Option, + }), + + /// An enum input to a `proc_macro_derive` macro. + /// + /// *This type is available if Syn is built with the `"derive"` + /// feature.* + pub Enum(DataEnum { + pub enum_token: Token![enum], + pub brace_token: token::Brace, + pub variants: Punctuated, + }), + + /// A tagged union input to a `proc_macro_derive` macro. + /// + /// *This type is available if Syn is built with the `"derive"` + /// feature.* + pub Union(DataUnion { + pub union_token: Token![union], + pub fields: FieldsNamed, + }), + } + + do_not_generate_to_tokens +} + +#[cfg(feature = "parsing")] +pub mod parsing { + use super::*; + + use parse::{Parse, ParseStream, Result}; + + impl Parse for DeriveInput { + fn parse(input: ParseStream) -> Result { + let attrs = input.call(Attribute::parse_outer)?; + let vis = input.parse::()?; + + let lookahead = input.lookahead1(); + if lookahead.peek(Token![struct]) { + let struct_token = input.parse::()?; + let ident = input.parse::()?; + let generics = input.parse::()?; + let (where_clause, fields, semi) = data_struct(input)?; + Ok(DeriveInput { + attrs: attrs, + vis: vis, + ident: ident, + generics: Generics { + where_clause: where_clause, + ..generics + }, + data: Data::Struct(DataStruct { + struct_token: struct_token, + fields: fields, + semi_token: semi, + }), + }) + } else if lookahead.peek(Token![enum]) { + let enum_token = input.parse::()?; + let ident = input.parse::()?; + let generics = input.parse::()?; + let (where_clause, brace, variants) = data_enum(input)?; + Ok(DeriveInput { + attrs: attrs, + vis: vis, + ident: ident, + generics: Generics { + where_clause: where_clause, + ..generics + }, + data: Data::Enum(DataEnum { + enum_token: enum_token, + brace_token: brace, + variants: variants, + }), + }) + } else if lookahead.peek(Token![union]) { + let union_token = input.parse::()?; + let ident = input.parse::()?; + let generics = input.parse::()?; + let (where_clause, fields) = data_union(input)?; + Ok(DeriveInput { + attrs: attrs, + vis: vis, + ident: ident, + generics: Generics { + where_clause: where_clause, + ..generics + }, + data: Data::Union(DataUnion { + union_token: union_token, + fields: fields, + }), + }) + } else { + Err(lookahead.error()) + } + } + } + + pub fn data_struct( + input: ParseStream, + ) -> Result<(Option, Fields, Option)> { + let mut lookahead = input.lookahead1(); + let mut where_clause = None; + if lookahead.peek(Token![where]) { + where_clause = Some(input.parse()?); + lookahead = input.lookahead1(); + } + + if where_clause.is_none() && lookahead.peek(token::Paren) { + let fields = input.parse()?; + + lookahead = input.lookahead1(); + if lookahead.peek(Token![where]) { + where_clause = Some(input.parse()?); + lookahead = input.lookahead1(); + } + + if lookahead.peek(Token![;]) { + let semi = input.parse()?; + Ok((where_clause, Fields::Unnamed(fields), Some(semi))) + } else { + Err(lookahead.error()) + } + } else if lookahead.peek(token::Brace) { + let fields = input.parse()?; + Ok((where_clause, Fields::Named(fields), None)) + } else if lookahead.peek(Token![;]) { + let semi = input.parse()?; + Ok((where_clause, Fields::Unit, Some(semi))) + } else { + Err(lookahead.error()) + } + } + + pub fn data_enum( + input: ParseStream, + ) -> Result<( + Option, + token::Brace, + Punctuated, + )> { + let where_clause = input.parse()?; + + let content; + let brace = braced!(content in input); + let variants = content.parse_terminated(Variant::parse)?; + + Ok((where_clause, brace, variants)) + } + + pub fn data_union(input: ParseStream) -> Result<(Option, FieldsNamed)> { + let where_clause = input.parse()?; + let fields = input.parse()?; + Ok((where_clause, fields)) + } +} + +#[cfg(feature = "printing")] +mod printing { + use super::*; + + use proc_macro2::TokenStream; + use quote::ToTokens; + + use attr::FilterAttrs; + use print::TokensOrDefault; + + impl ToTokens for DeriveInput { + fn to_tokens(&self, tokens: &mut TokenStream) { + for attr in self.attrs.outer() { + attr.to_tokens(tokens); + } + self.vis.to_tokens(tokens); + match self.data { + Data::Struct(ref d) => d.struct_token.to_tokens(tokens), + Data::Enum(ref d) => d.enum_token.to_tokens(tokens), + Data::Union(ref d) => d.union_token.to_tokens(tokens), + } + self.ident.to_tokens(tokens); + self.generics.to_tokens(tokens); + match self.data { + Data::Struct(ref data) => match data.fields { + Fields::Named(ref fields) => { + self.generics.where_clause.to_tokens(tokens); + fields.to_tokens(tokens); + } + Fields::Unnamed(ref fields) => { + fields.to_tokens(tokens); + self.generics.where_clause.to_tokens(tokens); + TokensOrDefault(&data.semi_token).to_tokens(tokens); + } + Fields::Unit => { + self.generics.where_clause.to_tokens(tokens); + TokensOrDefault(&data.semi_token).to_tokens(tokens); + } + }, + Data::Enum(ref data) => { + self.generics.where_clause.to_tokens(tokens); + data.brace_token.surround(tokens, |tokens| { + data.variants.to_tokens(tokens); + }); + } + Data::Union(ref data) => { + self.generics.where_clause.to_tokens(tokens); + data.fields.to_tokens(tokens); + } + } + } + } +} --- /dev/null +++ b/vendor/syn-0.15.7/src/error.rs @@ -0,0 +1,137 @@ +// Copyright 2018 Syn Developers +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std; +use std::fmt::{self, Display}; +use std::iter::FromIterator; + +use proc_macro2::{ + Delimiter, Group, Ident, LexError, Literal, Punct, Spacing, Span, TokenStream, TokenTree, +}; + +use buffer::Cursor; + +/// The result of a Syn parser. +pub type Result = std::result::Result; + +/// Error returned when a Syn parser cannot parse the input tokens. +/// +/// Refer to the [module documentation] for details about parsing in Syn. +/// +/// [module documentation]: index.html +/// +/// *This type is available if Syn is built with the `"parsing"` feature.* +#[derive(Debug, Clone)] +pub struct Error { + span: Span, + message: String, +} + +impl Error { + /// Usually the [`ParseStream::error`] method will be used instead, which + /// automatically uses the correct span from the current position of the + /// parse stream. + /// + /// Use `Error::new` when the error needs to be triggered on some span other + /// than where the parse stream is currently positioned. + /// + /// [`ParseStream::error`]: struct.ParseBuffer.html#method.error + /// + /// # Example + /// + /// ``` + /// #[macro_use] + /// extern crate syn; + /// + /// use syn::{Ident, LitStr}; + /// use syn::parse::{Error, ParseStream, Result}; + /// + /// // Parses input that looks like `name = "string"` where the key must be + /// // the identifier `name` and the value may be any string literal. + /// // Returns the string literal. + /// fn parse_name(input: ParseStream) -> Result { + /// let name_token: Ident = input.parse()?; + /// if name_token != "name" { + /// // Trigger an error not on the current position of the stream, + /// // but on the position of the unexpected identifier. + /// return Err(Error::new(name_token.span(), "expected `name`")); + /// } + /// input.parse::()?; + /// let s: LitStr = input.parse()?; + /// Ok(s) + /// } + /// # + /// # fn main() {} + /// ``` + pub fn new(span: Span, message: T) -> Self { + Error { + span: span, + message: message.to_string(), + } + } + + pub fn span(&self) -> Span { + self.span + } + + /// Render the error as an invocation of [`compile_error!`]. + /// + /// The [`parse_macro_input!`] macro provides a convenient way to invoke + /// this method correctly in a procedural macro. + /// + /// [`compile_error!`]: https://doc.rust-lang.org/std/macro.compile_error.html + /// [`parse_macro_input!`]: ../macro.parse_macro_input.html + pub fn to_compile_error(&self) -> TokenStream { + // compile_error!($message) + TokenStream::from_iter(vec![ + TokenTree::Ident(Ident::new("compile_error", self.span)), + TokenTree::Punct({ + let mut punct = Punct::new('!', Spacing::Alone); + punct.set_span(self.span); + punct + }), + TokenTree::Group({ + let mut group = Group::new(Delimiter::Brace, { + TokenStream::from_iter(vec![TokenTree::Literal({ + let mut string = Literal::string(&self.message); + string.set_span(self.span); + string + })]) + }); + group.set_span(self.span); + group + }), + ]) + } +} + +pub fn new_at(scope: Span, cursor: Cursor, message: T) -> Error { + if cursor.eof() { + Error::new(scope, format!("unexpected end of input, {}", message)) + } else { + Error::new(cursor.span(), message) + } +} + +impl Display for Error { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(&self.message) + } +} + +impl std::error::Error for Error { + fn description(&self) -> &str { + "parse error" + } +} + +impl From for Error { + fn from(err: LexError) -> Self { + Error::new(Span::call_site(), format!("{:?}", err)) + } +} --- /dev/null +++ b/vendor/syn-0.15.7/src/export.rs @@ -0,0 +1,32 @@ +pub use std::clone::Clone; +pub use std::cmp::{Eq, PartialEq}; +pub use std::convert::From; +pub use std::default::Default; +pub use std::fmt::{self, Debug, Formatter}; +pub use std::hash::{Hash, Hasher}; +pub use std::marker::Copy; +pub use std::option::Option::{None, Some}; +pub use std::result::Result::{Err, Ok}; + +pub use proc_macro2::{Span, TokenStream as TokenStream2}; + +pub use span::IntoSpans; + +#[cfg(all( + not(all(target_arch = "wasm32", target_os = "unknown")), + feature = "proc-macro" +))] +pub use proc_macro::TokenStream; + +#[cfg(feature = "printing")] +pub use quote::{ToTokens, TokenStreamExt}; + +#[allow(non_camel_case_types)] +pub type bool = help::Bool; +#[allow(non_camel_case_types)] +pub type str = help::Str; + +mod help { + pub type Bool = bool; + pub type Str = str; +} --- /dev/null +++ b/vendor/syn-0.15.7/src/expr.rs @@ -0,0 +1,3774 @@ +// Copyright 2018 Syn Developers +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::*; +use proc_macro2::{Span, TokenStream}; +use punctuated::Punctuated; +#[cfg(feature = "extra-traits")] +use std::hash::{Hash, Hasher}; +#[cfg(all(feature = "parsing", feature = "full"))] +use std::mem; +#[cfg(feature = "extra-traits")] +use tt::TokenStreamHelper; + +ast_enum_of_structs! { + /// A Rust expression. + /// + /// *This type is available if Syn is built with the `"derive"` or `"full"` + /// feature.* + /// + /// # Syntax tree enums + /// + /// This type is a syntax tree enum. In Syn this and other syntax tree enums + /// are designed to be traversed using the following rebinding idiom. + /// + /// ``` + /// # use syn::Expr; + /// # + /// # fn example(expr: Expr) { + /// # const IGNORE: &str = stringify! { + /// let expr: Expr = /* ... */; + /// # }; + /// match expr { + /// Expr::MethodCall(expr) => { + /// /* ... */ + /// } + /// Expr::Cast(expr) => { + /// /* ... */ + /// } + /// Expr::If(expr) => { + /// /* ... */ + /// } + /// /* ... */ + /// # _ => {} + /// } + /// # } + /// ``` + /// + /// We begin with a variable `expr` of type `Expr` that has no fields + /// (because it is an enum), and by matching on it and rebinding a variable + /// with the same name `expr` we effectively imbue our variable with all of + /// the data fields provided by the variant that it turned out to be. So for + /// example above if we ended up in the `MethodCall` case then we get to use + /// `expr.receiver`, `expr.args` etc; if we ended up in the `If` case we get + /// to use `expr.cond`, `expr.then_branch`, `expr.else_branch`. + /// + /// The pattern is similar if the input expression is borrowed: + /// + /// ``` + /// # use syn::Expr; + /// # + /// # fn example(expr: &Expr) { + /// match *expr { + /// Expr::MethodCall(ref expr) => { + /// # } + /// # _ => {} + /// # } + /// # } + /// ``` + /// + /// This approach avoids repeating the variant names twice on every line. + /// + /// ``` + /// # use syn::{Expr, ExprMethodCall}; + /// # + /// # fn example(expr: Expr) { + /// # match expr { + /// Expr::MethodCall(ExprMethodCall { method, args, .. }) => { // repetitive + /// # } + /// # _ => {} + /// # } + /// # } + /// ``` + /// + /// In general, the name to which a syntax tree enum variant is bound should + /// be a suitable name for the complete syntax tree enum type. + /// + /// ``` + /// # use syn::{Expr, ExprField}; + /// # + /// # fn example(discriminant: &ExprField) { + /// // Binding is called `base` which is the name I would use if I were + /// // assigning `*discriminant.base` without an `if let`. + /// if let Expr::Tuple(ref base) = *discriminant.base { + /// # } + /// # } + /// ``` + /// + /// A sign that you may not be choosing the right variable names is if you + /// see names getting repeated in your code, like accessing + /// `receiver.receiver` or `pat.pat` or `cond.cond`. + pub enum Expr { + /// A box expression: `box f`. + /// + /// *This type is available if Syn is built with the `"full"` feature.* + pub Box(ExprBox #full { + pub attrs: Vec, + pub box_token: Token![box], + pub expr: Box, + }), + + /// A placement expression: `place <- value`. + /// + /// *This type is available if Syn is built with the `"full"` feature.* + pub InPlace(ExprInPlace #full { + pub attrs: Vec, + pub place: Box, + pub arrow_token: Token![<-], + pub value: Box, + }), + + /// A slice literal expression: `[a, b, c, d]`. + /// + /// *This type is available if Syn is built with the `"full"` feature.* + pub Array(ExprArray #full { + pub attrs: Vec, + pub bracket_token: token::Bracket, + pub elems: Punctuated, + }), + + /// A function call expression: `invoke(a, b)`. + /// + /// *This type is available if Syn is built with the `"derive"` or + /// `"full"` feature.* + pub Call(ExprCall { + pub attrs: Vec, + pub func: Box, + pub paren_token: token::Paren, + pub args: Punctuated, + }), + + /// A method call expression: `x.foo::(a, b)`. + /// + /// *This type is available if Syn is built with the `"full"` feature.* + pub MethodCall(ExprMethodCall #full { + pub attrs: Vec, + pub receiver: Box, + pub dot_token: Token![.], + pub method: Ident, + pub turbofish: Option, + pub paren_token: token::Paren, + pub args: Punctuated, + }), + + /// A tuple expression: `(a, b, c, d)`. + /// + /// *This type is available if Syn is built with the `"full"` feature.* + pub Tuple(ExprTuple #full { + pub attrs: Vec, + pub paren_token: token::Paren, + pub elems: Punctuated, + }), + + /// A binary operation: `a + b`, `a * b`. + /// + /// *This type is available if Syn is built with the `"derive"` or + /// `"full"` feature.* + pub Binary(ExprBinary { + pub attrs: Vec, + pub left: Box, + pub op: BinOp, + pub right: Box, + }), + + /// A unary operation: `!x`, `*x`. + /// + /// *This type is available if Syn is built with the `"derive"` or + /// `"full"` feature.* + pub Unary(ExprUnary { + pub attrs: Vec, + pub op: UnOp, + pub expr: Box, + }), + + /// A literal in place of an expression: `1`, `"foo"`. + /// + /// *This type is available if Syn is built with the `"derive"` or + /// `"full"` feature.* + pub Lit(ExprLit { + pub attrs: Vec, + pub lit: Lit, + }), + + /// A cast expression: `foo as f64`. + /// + /// *This type is available if Syn is built with the `"derive"` or + /// `"full"` feature.* + pub Cast(ExprCast { + pub attrs: Vec, + pub expr: Box, + pub as_token: Token![as], + pub ty: Box, + }), + + /// A type ascription expression: `foo: f64`. + /// + /// *This type is available if Syn is built with the `"full"` feature.* + pub Type(ExprType #full { + pub attrs: Vec, + pub expr: Box, + pub colon_token: Token![:], + pub ty: Box, + }), + + /// A `let` guard: `let Some(x) = opt`. + /// + /// *This type is available if Syn is built with the `"full"` feature.* + pub Let(ExprLet #full { + pub attrs: Vec, + pub let_token: Token![let], + pub pats: Punctuated, + pub eq_token: Token![=], + pub expr: Box, + }), + + /// An `if` expression with an optional `else` block: `if expr { ... } + /// else { ... }`. + /// + /// The `else` branch expression may only be an `If` or `Block` + /// expression, not any of the other types of expression. + /// + /// *This type is available if Syn is built with the `"full"` feature.* + pub If(ExprIf #full { + pub attrs: Vec, + pub if_token: Token![if], + pub cond: Box, + pub then_branch: Block, + pub else_branch: Option<(Token![else], Box)>, + }), + + /// A while loop: `while expr { ... }`. + /// + /// *This type is available if Syn is built with the `"full"` feature.* + pub While(ExprWhile #full { + pub attrs: Vec, + pub label: Option